From 0cf5dfec2c8313fc2ed2aab8d10bf3dc4b9c283f Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期四, 14 三月 2024 14:41:49 +0800
Subject: [PATCH] update cmakelist

---
 funasr/models/seaco_paraformer/model.py |  153 +++++++++++++++++++++++++-------------------------
 1 files changed, 77 insertions(+), 76 deletions(-)

diff --git a/funasr/models/seaco_paraformer/model.py b/funasr/models/seaco_paraformer/model.py
index 1867bbf..92fc989 100644
--- a/funasr/models/seaco_paraformer/model.py
+++ b/funasr/models/seaco_paraformer/model.py
@@ -19,16 +19,14 @@
 
 from funasr.register import tables
 from funasr.utils import postprocess_utils
-from funasr.metrics.compute_acc import th_accuracy
 from funasr.models.paraformer.model import Paraformer
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.models.paraformer.search import Hypothesis
-from funasr.models.paraformer.cif_predictor import mae_loss
 from funasr.train_utils.device_funcs import force_gatherable
 from funasr.models.bicif_paraformer.model import BiCifParaformer
 from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
-from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
 from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
+from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
 from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
 from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
 
@@ -66,7 +64,6 @@
   
         # bias encoder
         if self.bias_encoder_type == 'lstm':
-            logging.warning("enable bias encoder sampling and contextual training")
             self.bias_encoder = torch.nn.LSTM(self.inner_dim, 
                                               self.inner_dim, 
                                               2, 
@@ -77,9 +74,8 @@
                 self.lstm_proj = torch.nn.Linear(self.inner_dim*2, self.inner_dim)
             else:
                 self.lstm_proj = None
-            self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
+            # self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
         elif self.bias_encoder_type == 'mean':
-            logging.warning("enable bias encoder sampling and contextual training")
             self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
         else:
             logging.error("Unsupport bias encoder type: {}".format(self.bias_encoder_type))
@@ -103,6 +99,7 @@
         )
         self.train_decoder = kwargs.get("train_decoder", False)
         self.NO_BIAS = kwargs.get("NO_BIAS", 8377)
+        self.predictor_name = kwargs.get("predictor")
         
     def forward(
         self,
@@ -131,10 +128,9 @@
     
         hotword_pad = kwargs.get("hotword_pad")
         hotword_lengths = kwargs.get("hotword_lengths")
-        dha_pad = kwargs.get("dha_pad")
-
+        seaco_label_pad = kwargs.get("seaco_label_pad")
+        
         batch_size = speech.shape[0]
-        self.step_cur += 1
         # for data-parallel
         text = text[:, : text_lengths.max()]
         speech = speech[:, :speech_lengths.max()]
@@ -152,7 +148,7 @@
                                         ys_lengths, 
                                         hotword_pad, 
                                         hotword_lengths, 
-                                        dha_pad,
+                                        seaco_label_pad,
                                         )
         if self.train_decoder:
             loss_att, acc_att = self._calc_att_loss(
@@ -175,6 +171,12 @@
     def _merge(self, cif_attended, dec_attended):
         return cif_attended + dec_attended
     
+    def calc_predictor(self, encoder_out, encoder_out_lens):
+        encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
+            encoder_out.device)
+        predictor_outs = self.predictor(encoder_out, None, encoder_out_mask, ignore_id=self.ignore_id)
+        return predictor_outs[:4]
+    
     def _calc_seaco_loss(
             self,
             encoder_out: torch.Tensor,
@@ -183,7 +185,7 @@
             ys_lengths: torch.Tensor,
             hotword_pad: torch.Tensor,
             hotword_lengths: torch.Tensor,
-            dha_pad: torch.Tensor,
+            seaco_label_pad: torch.Tensor,
     ):  
         # predictor forward
         encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
@@ -202,7 +204,7 @@
         dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_out, ys_lengths)
         merged = self._merge(cif_attended, dec_attended)
         dha_output = self.hotword_output_layer(merged[:, :-1])  # remove the last token in loss calculation
-        loss_att = self.criterion_seaco(dha_output, dha_pad)
+        loss_att = self.criterion_seaco(dha_output, seaco_label_pad)
         return loss_att
 
     def _seaco_decode_with_ASF(self, 
@@ -214,25 +216,24 @@
                                nfilter=50,
                                seaco_weight=1.0):
         # decoder forward
+
         decoder_out, decoder_hidden, _ = self.decoder(encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, return_hidden=True, return_both=True)
+
         decoder_pred = torch.log_softmax(decoder_out, dim=-1)
         if hw_list is not None:
             hw_lengths = [len(i) for i in hw_list]
             hw_list_ = [torch.Tensor(i).long() for i in hw_list]
             hw_list_pad = pad_list(hw_list_, 0).to(encoder_out.device)
             selected = self._hotword_representation(hw_list_pad, torch.Tensor(hw_lengths).int().to(encoder_out.device))
+
             contextual_info = selected.squeeze(0).repeat(encoder_out.shape[0], 1, 1).to(encoder_out.device)
             num_hot_word = contextual_info.shape[1]
             _contextual_length = torch.Tensor([num_hot_word]).int().repeat(encoder_out.shape[0]).to(encoder_out.device)
-            
+
             # ASF Core
             if nfilter > 0 and nfilter < num_hot_word:
-                for dec in self.seaco_decoder.decoders:
-                    dec.reserve_attn = True
-                # cif_attended, _ = self.decoder2(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
-                dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
-                # cif_filter = torch.topk(self.decoder2.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1], min(nfilter, num_hot_word-1))[1].tolist()
-                hotword_scores = self.seaco_decoder.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1]
+                hotword_scores = self.seaco_decoder.forward_asf6(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
+                hotword_scores = hotword_scores[0].sum(0).sum(0)
                 # hotword_scores /= torch.sqrt(torch.tensor(hw_lengths)[:-1].float()).to(hotword_scores.device)
                 dec_filter = torch.topk(hotword_scores, min(nfilter, num_hot_word-1))[1].tolist()
                 add_filter = dec_filter
@@ -243,21 +244,18 @@
                 contextual_info = selected.squeeze(0).repeat(encoder_out.shape[0], 1, 1).to(encoder_out.device)
                 num_hot_word = contextual_info.shape[1]
                 _contextual_length = torch.Tensor([num_hot_word]).int().repeat(encoder_out.shape[0]).to(encoder_out.device)
-                for dec in self.seaco_decoder.decoders:
-                    dec.attn_mat = []
-                    dec.reserve_attn = False
             
             # SeACo Core
             cif_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
             dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
             merged = self._merge(cif_attended, dec_attended)
-            
+
             dha_output = self.hotword_output_layer(merged)  # remove the last token in loss calculation
             dha_pred = torch.log_softmax(dha_output, dim=-1)
             def _merge_res(dec_output, dha_output):
                 lmbd = torch.Tensor([seaco_weight] * dha_output.shape[0])
                 dha_ids = dha_output.max(-1)[-1]# [0]
-                dha_mask = (dha_ids == 8377).int().unsqueeze(-1)
+                dha_mask = (dha_ids == self.NO_BIAS).int().unsqueeze(-1)
                 a = (1 - lmbd) / lmbd
                 b = 1 / lmbd
                 a, b = a.to(dec_output.device), b.to(dec_output.device)
@@ -265,8 +263,8 @@
                 # logits = dec_output * dha_mask + dha_output[:,:,:-1] * (1-dha_mask)
                 logits = dec_output * dha_mask + dha_output[:,:,:] * (1-dha_mask)
                 return logits
+
             merged_pred = _merge_res(decoder_pred, dha_pred)
-            # import pdb; pdb.set_trace()
             return merged_pred
         else:
             return decoder_pred
@@ -276,6 +274,8 @@
                                 hotword_lengths):
         if self.bias_encoder_type != 'lstm':
             logging.error("Unsupported bias encoder type")
+            
+        '''
         hw_embed = self.decoder.embed(hotword_pad)
         hw_embed, (_, _) = self.bias_encoder(hw_embed)
         if self.lstm_proj is not None:
@@ -283,26 +283,20 @@
         _ind = np.arange(0, hw_embed.shape[0]).tolist()
         selected = hw_embed[_ind, [i-1 for i in hotword_lengths.detach().cpu().tolist()]]
         return selected
+        '''
 
-    '''
-     def calc_predictor(self, encoder_out, encoder_out_lens):
-        encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
-            encoder_out.device)
-        pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index, pre_token_length2 = self.predictor(encoder_out,
-                                                                                                          None,
-                                                                                                          encoder_out_mask,
-                                                                                                          ignore_id=self.ignore_id)
-        return pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index
-
-
-    def calc_predictor_timestamp(self, encoder_out, encoder_out_lens, token_num):
-        encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
-            encoder_out.device)
-        ds_alphas, ds_cif_peak, us_alphas, us_peaks = self.predictor.get_upsample_timestamp(encoder_out,
-                                                                                            encoder_out_mask,
-                                                                                            token_num)
-        return ds_alphas, ds_cif_peak, us_alphas, us_peaks
-    '''        
+        # hw_embed = self.sac_embedding(hotword_pad)
+        hw_embed = self.decoder.embed(hotword_pad)
+        hw_embed = torch.nn.utils.rnn.pack_padded_sequence(hw_embed, hotword_lengths.cpu().type(torch.int64), batch_first=True, enforce_sorted=False)
+        packed_rnn_output, _ = self.bias_encoder(hw_embed)
+        rnn_output = torch.nn.utils.rnn.pad_packed_sequence(packed_rnn_output, batch_first=True)[0]
+        if self.lstm_proj is not None:
+            hw_hidden = self.lstm_proj(rnn_output)
+        else:
+            hw_hidden = rnn_output
+        _ind = np.arange(0, hw_hidden.shape[0]).tolist()
+        selected = hw_hidden[_ind, [i-1 for i in hotword_lengths.detach().cpu().tolist()]]
+        return selected      
   
     def inference(self,
                  data_in,
@@ -320,7 +314,6 @@
             logging.info("enable beam_search")
             self.init_beam_search(**kwargs)
             self.nbest = kwargs.get("nbest", 1)
-        
         meta_data = {}
         
         # extract fbank feats
@@ -337,7 +330,7 @@
         
         speech = speech.to(device=kwargs["device"])
         speech_lengths = speech_lengths.to(device=kwargs["device"])
-
+        
         # hotword
         self.hotword_list = self.generate_hotwords_list(kwargs.get("hotword", None), tokenizer=tokenizer, frontend=frontend)
         
@@ -348,21 +341,26 @@
         
         # predictor
         predictor_outs = self.calc_predictor(encoder_out, encoder_out_lens)
-        pre_acoustic_embeds, pre_token_length, _, _ = predictor_outs[0], predictor_outs[1], \
-                                                                        predictor_outs[2], predictor_outs[3]
+        pre_acoustic_embeds, pre_token_length = predictor_outs[0], predictor_outs[1]
         pre_token_length = pre_token_length.round().long()
         if torch.max(pre_token_length) < 1:
             return []
 
+        decoder_out = self._seaco_decode_with_ASF(encoder_out, 
+                                                  encoder_out_lens,
+                                                  pre_acoustic_embeds,
+                                                  pre_token_length,
+                                                  hw_list=self.hotword_list
+                                                  )
 
-        decoder_out = self._seaco_decode_with_ASF(encoder_out, encoder_out_lens,
-                                                   pre_acoustic_embeds,
-                                                   pre_token_length,
-                                                   hw_list=self.hotword_list)
         # decoder_out, _ = decoder_outs[0], decoder_outs[1]
-        _, _, us_alphas, us_peaks = self.calc_predictor_timestamp(encoder_out, encoder_out_lens,
-                                                                  pre_token_length)
-        
+        if self.predictor_name == "CifPredictorV3":
+            _, _, us_alphas, us_peaks = self.calc_predictor_timestamp(encoder_out, 
+                                                                      encoder_out_lens,
+                                                                      pre_token_length)
+        else:
+            us_alphas = None
+            
         results = []
         b, n, d = decoder_out.size()
         for i in range(b):
@@ -387,9 +385,11 @@
                 nbest_hyps = [Hypothesis(yseq=yseq, score=score)]
             for nbest_idx, hyp in enumerate(nbest_hyps):
                 ibest_writer = None
-                if ibest_writer is None and kwargs.get("output_dir") is not None:
-                    writer = DatadirWriter(kwargs.get("output_dir"))
-                    ibest_writer = writer[f"{nbest_idx + 1}best_recog"]
+                if kwargs.get("output_dir") is not None:
+                    if not hasattr(self, "writer"):
+                        self.writer = DatadirWriter(kwargs.get("output_dir"))
+                    ibest_writer = self.writer[f"{nbest_idx + 1}best_recog"]
+                    
                 # remove sos/eos and get results
                 last_pos = -1
                 if isinstance(hyp.yseq, list):
@@ -405,24 +405,25 @@
                     # Change integer-ids to tokens
                     token = tokenizer.ids2tokens(token_int)
                     text = tokenizer.tokens2text(token)
-                    
-                    _, timestamp = ts_prediction_lfr6_standard(us_alphas[i][:encoder_out_lens[i] * 3],
-                                                               us_peaks[i][:encoder_out_lens[i] * 3],
-                                                               copy.copy(token),
-                                                               vad_offset=kwargs.get("begin_time", 0))
-                    
-                    text_postprocessed, time_stamp_postprocessed, word_lists = postprocess_utils.sentence_postprocess(
-                        token, timestamp)
-
-                    result_i = {"key": key[i], "text": text_postprocessed,
-                                "timestamp": time_stamp_postprocessed,
-                                }
-                    
-                    if ibest_writer is not None:
-                        ibest_writer["token"][key[i]] = " ".join(token)
-                        # ibest_writer["text"][key[i]] = text
-                        ibest_writer["timestamp"][key[i]] = time_stamp_postprocessed
-                        ibest_writer["text"][key[i]] = text_postprocessed
+                    if us_alphas is not None:
+                        _, timestamp = ts_prediction_lfr6_standard(us_alphas[i][:encoder_out_lens[i] * 3],
+                                                                us_peaks[i][:encoder_out_lens[i] * 3],
+                                                                copy.copy(token),
+                                                                vad_offset=kwargs.get("begin_time", 0))
+                        text_postprocessed, time_stamp_postprocessed, _ = \
+                            postprocess_utils.sentence_postprocess(token, timestamp)
+                        result_i = {"key": key[i], "text": text_postprocessed,
+                                    "timestamp": time_stamp_postprocessed}
+                        if ibest_writer is not None:
+                            ibest_writer["token"][key[i]] = " ".join(token)
+                            ibest_writer["timestamp"][key[i]] = time_stamp_postprocessed
+                            ibest_writer["text"][key[i]] = text_postprocessed
+                    else:
+                        text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
+                        result_i = {"key": key[i], "text": text_postprocessed}
+                        if ibest_writer is not None:
+                            ibest_writer["token"][key[i]] = " ".join(token)
+                            ibest_writer["text"][key[i]] = text_postprocessed
                 else:
                     result_i = {"key": key[i], "token_int": token_int}
                 results.append(result_i)

--
Gitblit v1.9.1