From 3cd3473bf7a3b41484baa86d9092248d78e7af39 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 21 四月 2023 17:17:37 +0800
Subject: [PATCH] docs

---
 funasr/models/e2e_tp.py |   27 ++++++++++++++++++++++++---
 1 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/funasr/models/e2e_tp.py b/funasr/models/e2e_tp.py
index 8808008..d1367ab 100644
--- a/funasr/models/e2e_tp.py
+++ b/funasr/models/e2e_tp.py
@@ -32,7 +32,7 @@
 
 class TimestampPredictor(AbsESPnetModel):
     """
-    Author: Speech Lab, Alibaba Group, China
+    Author: Speech Lab of DAMO Academy, Alibaba Group
     """
 
     def __init__(
@@ -41,6 +41,7 @@
             encoder: AbsEncoder,
             predictor: CifPredictorV3,
             predictor_bias: int = 0,
+            token_list=None,
     ):
         assert check_argument_types()
 
@@ -54,6 +55,7 @@
         self.predictor = predictor
         self.predictor_bias = predictor_bias
         self.criterion_pre = mae_loss()
+        self.token_list = token_list
     
     def forward(
             self,
@@ -148,7 +150,26 @@
     def calc_predictor_timestamp(self, encoder_out, encoder_out_lens, token_num):
         encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
             encoder_out.device)
-        ds_alphas, ds_cif_peak, us_alphas, us_cif_peak = self.predictor.get_upsample_timestamp(encoder_out,
+        ds_alphas, ds_cif_peak, us_alphas, us_peaks = self.predictor.get_upsample_timestamp(encoder_out,
                                                                                                encoder_out_mask,
                                                                                                token_num)
-        return ds_alphas, ds_cif_peak, us_alphas, us_cif_peak
+        return ds_alphas, ds_cif_peak, us_alphas, us_peaks
+
+    def collect_feats(
+            self,
+            speech: torch.Tensor,
+            speech_lengths: torch.Tensor,
+            text: torch.Tensor,
+            text_lengths: torch.Tensor,
+    ) -> Dict[str, torch.Tensor]:
+        if self.extract_feats_in_collect_stats:
+            feats, feats_lengths = self._extract_feats(speech, speech_lengths)
+        else:
+            # Generate dummy stats if extract_feats_in_collect_stats is False
+            logging.warning(
+                "Generating dummy stats for feats and feats_lengths, "
+                "because encoder_conf.extract_feats_in_collect_stats is "
+                f"{self.extract_feats_in_collect_stats}"
+            )
+            feats, feats_lengths = speech, speech_lengths
+        return {"feats": feats, "feats_lengths": feats_lengths}

--
Gitblit v1.9.1