From 4ace5a95b052d338947fc88809a440ccd55cf6b4 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 16 十一月 2023 16:39:52 +0800
Subject: [PATCH] funasr pages

---
 funasr/models/encoder/sanm_encoder.py |   50 ++------------------------------------------------
 1 files changed, 2 insertions(+), 48 deletions(-)

diff --git a/funasr/models/encoder/sanm_encoder.py b/funasr/models/encoder/sanm_encoder.py
index ac4240c..c15343e 100644
--- a/funasr/models/encoder/sanm_encoder.py
+++ b/funasr/models/encoder/sanm_encoder.py
@@ -873,52 +873,6 @@
         cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
         return overlap_feats
 
-    #def forward_chunk(self,
-    #                  xs_pad: torch.Tensor,
-    #                  ilens: torch.Tensor,
-    #                  cache: dict = None,
-    #                  ctc: CTC = None,
-    #                  ):
-    #    xs_pad *= self.output_size() ** 0.5
-    #    if self.embed is None:
-    #        xs_pad = xs_pad
-    #    else:
-    #        xs_pad = self.embed(xs_pad, cache)
-    #    if cache["tail_chunk"]:
-    #        xs_pad = to_device(cache["feats"], device=xs_pad.device)
-    #    else:
-    #        xs_pad = self._add_overlap_chunk(xs_pad, cache)
-    #    encoder_outs = self.encoders0(xs_pad, None, None, None, None)
-    #    xs_pad, masks = encoder_outs[0], encoder_outs[1]
-    #    intermediate_outs = []
-    #    if len(self.interctc_layer_idx) == 0:
-    #        encoder_outs = self.encoders(xs_pad, None, None, None, None)
-    #        xs_pad, masks = encoder_outs[0], encoder_outs[1]
-    #    else:
-    #        for layer_idx, encoder_layer in enumerate(self.encoders):
-    #            encoder_outs = encoder_layer(xs_pad, None, None, None, None)
-    #            xs_pad, masks = encoder_outs[0], encoder_outs[1]
-    #            if layer_idx + 1 in self.interctc_layer_idx:
-    #                encoder_out = xs_pad
-
-    #                # intermediate outputs are also normalized
-    #                if self.normalize_before:
-    #                    encoder_out = self.after_norm(encoder_out)
-
-    #                intermediate_outs.append((layer_idx + 1, encoder_out))
-
-    #                if self.interctc_use_conditioning:
-    #                    ctc_out = ctc.softmax(encoder_out)
-    #                    xs_pad = xs_pad + self.conditioning_layer(ctc_out)
-
-    #    if self.normalize_before:
-    #        xs_pad = self.after_norm(xs_pad)
-
-    #    if len(intermediate_outs) > 0:
-    #        return (xs_pad, intermediate_outs), None, None
-    #    return xs_pad, ilens, None
-
-
     def forward_chunk(self,
                       xs_pad: torch.Tensor,
                       ilens: torch.Tensor,
@@ -945,11 +899,11 @@
 
         for layer_idx, encoder_layer in enumerate(self.encoders):
             encoder_outs = encoder_layer.forward_chunk(xs_pad, new_cache[layer_idx+len(self.encoders0)], cache["chunk_size"], cache["encoder_chunk_look_back"])
-            xs_pad, new_cache[layer_idx+1] = encoder_outs[0], encoder_outs[1]
+            xs_pad, new_cache[layer_idx+len(self.encoders0)] = encoder_outs[0], encoder_outs[1]
 
         if self.normalize_before:
             xs_pad = self.after_norm(xs_pad)
-        if cache["encoder_chunk_look_back"] > 0:
+        if cache["encoder_chunk_look_back"] > 0 or cache["encoder_chunk_look_back"] == -1:
             cache["opt"] = new_cache
 
         return xs_pad, ilens, None

--
Gitblit v1.9.1