| | |
| | | import logging |
| | | import torch |
| | | import torch.nn as nn |
| | | import torch.nn.functional as F |
| | | from funasr.modules.streaming_utils.chunk_utilis import overlap_chunk |
| | | from typeguard import check_argument_types |
| | | import numpy as np |
| | | from funasr.torch_utils.device_funcs import to_device |
| | | from funasr.modules.nets_utils import make_pad_mask |
| | | from funasr.modules.attention import MultiHeadedAttention, MultiHeadedAttentionSANM, MultiHeadedAttentionSANMwithMask |
| | | from funasr.modules.embedding import SinusoidalPositionEncoder, StreamSinusoidalPositionEncoder |
| | |
| | | return (xs_pad, intermediate_outs), olens, None |
| | | return xs_pad, olens, None |
| | | |
| | | def _add_overlap_chunk(self, feats: np.ndarray, cache: dict = {}): |
| | | if len(cache) == 0: |
| | | return feats |
| | | # process last chunk |
| | | cache["feats"] = to_device(cache["feats"], device=feats.device) |
| | | overlap_feats = torch.cat((cache["feats"], feats), dim=1) |
| | | if cache["is_final"]: |
| | | cache["feats"] = overlap_feats[:, -cache["chunk_size"][0]:, :] |
| | | if not cache["last_chunk"]: |
| | | padding_length = sum(cache["chunk_size"]) - overlap_feats.shape[1] |
| | | overlap_feats = overlap_feats.transpose(1, 2) |
| | | overlap_feats = F.pad(overlap_feats, (0, padding_length)) |
| | | overlap_feats = overlap_feats.transpose(1, 2) |
| | | else: |
| | | cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :] |
| | | return overlap_feats |
| | | |
| | | def forward_chunk(self, |
| | | xs_pad: torch.Tensor, |
| | | ilens: torch.Tensor, |
| | |
| | | xs_pad = xs_pad |
| | | else: |
| | | xs_pad = self.embed(xs_pad, cache) |
| | | |
| | | if cache["tail_chunk"]: |
| | | xs_pad = cache["feats"] |
| | | else: |
| | | xs_pad = self._add_overlap_chunk(xs_pad, cache) |
| | | encoder_outs = self.encoders0(xs_pad, None, None, None, None) |
| | | xs_pad, masks = encoder_outs[0], encoder_outs[1] |
| | | intermediate_outs = [] |