From bf4b3ef9cb95acaa2b92b98f236c4f3228cdbc2d Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 21 九月 2023 16:30:43 +0800
Subject: [PATCH] Merge pull request #976 from alibaba-damo-academy/dev_lhn
---
funasr/models/encoder/sanm_encoder.py | 46 ----------------------------------------------
1 files changed, 0 insertions(+), 46 deletions(-)
diff --git a/funasr/models/encoder/sanm_encoder.py b/funasr/models/encoder/sanm_encoder.py
index e04b9e7..c15343e 100644
--- a/funasr/models/encoder/sanm_encoder.py
+++ b/funasr/models/encoder/sanm_encoder.py
@@ -873,52 +873,6 @@
cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
return overlap_feats
- #def forward_chunk(self,
- # xs_pad: torch.Tensor,
- # ilens: torch.Tensor,
- # cache: dict = None,
- # ctc: CTC = None,
- # ):
- # xs_pad *= self.output_size() ** 0.5
- # if self.embed is None:
- # xs_pad = xs_pad
- # else:
- # xs_pad = self.embed(xs_pad, cache)
- # if cache["tail_chunk"]:
- # xs_pad = to_device(cache["feats"], device=xs_pad.device)
- # else:
- # xs_pad = self._add_overlap_chunk(xs_pad, cache)
- # encoder_outs = self.encoders0(xs_pad, None, None, None, None)
- # xs_pad, masks = encoder_outs[0], encoder_outs[1]
- # intermediate_outs = []
- # if len(self.interctc_layer_idx) == 0:
- # encoder_outs = self.encoders(xs_pad, None, None, None, None)
- # xs_pad, masks = encoder_outs[0], encoder_outs[1]
- # else:
- # for layer_idx, encoder_layer in enumerate(self.encoders):
- # encoder_outs = encoder_layer(xs_pad, None, None, None, None)
- # xs_pad, masks = encoder_outs[0], encoder_outs[1]
- # if layer_idx + 1 in self.interctc_layer_idx:
- # encoder_out = xs_pad
-
- # # intermediate outputs are also normalized
- # if self.normalize_before:
- # encoder_out = self.after_norm(encoder_out)
-
- # intermediate_outs.append((layer_idx + 1, encoder_out))
-
- # if self.interctc_use_conditioning:
- # ctc_out = ctc.softmax(encoder_out)
- # xs_pad = xs_pad + self.conditioning_layer(ctc_out)
-
- # if self.normalize_before:
- # xs_pad = self.after_norm(xs_pad)
-
- # if len(intermediate_outs) > 0:
- # return (xs_pad, intermediate_outs), None, None
- # return xs_pad, ilens, None
-
-
def forward_chunk(self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
--
Gitblit v1.9.1