From 543d900522403eccb4e387cbc41c5dce24091d1d Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 22 二月 2024 23:53:10 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR merge

---
 funasr/models/seaco_paraformer/model.py |   49 +++++++++---------------
 funasr/models/paraformer/decoder.py     |   56 ++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+), 31 deletions(-)

diff --git a/funasr/models/paraformer/decoder.py b/funasr/models/paraformer/decoder.py
index 68018a0..ad321e4 100644
--- a/funasr/models/paraformer/decoder.py
+++ b/funasr/models/paraformer/decoder.py
@@ -116,6 +116,22 @@
             # x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
 
         return x, tgt_mask, memory, memory_mask, cache
+    
+    def get_attn_mat(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
+        residual = tgt
+        tgt = self.norm1(tgt)
+        tgt = self.feed_forward(tgt)
+
+        x = tgt
+        if self.self_attn is not None:
+            tgt = self.norm2(tgt)
+            x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
+            x = residual + x
+
+        residual = x
+        x = self.norm3(x)
+        x_src_attn, attn_mat = self.src_attn(x, memory, memory_mask, ret_attn=True)
+        return attn_mat
 
     def forward_one_step(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
         """Compute decoded features.
@@ -396,6 +412,46 @@
             ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
         )
         return logp.squeeze(0), state
+    
+    def forward_asf2(
+        self,
+        hs_pad: torch.Tensor,
+        hlens: torch.Tensor,
+        ys_in_pad: torch.Tensor,
+        ys_in_lens: torch.Tensor,
+    ):
+
+        tgt = ys_in_pad
+        tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
+
+        memory = hs_pad
+        memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
+
+        tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask)
+        attn_mat = self.model.decoders[1].get_attn_mat(tgt, tgt_mask, memory, memory_mask)
+        return attn_mat
+    
+    def forward_asf6(
+        self,
+        hs_pad: torch.Tensor,
+        hlens: torch.Tensor,
+        ys_in_pad: torch.Tensor,
+        ys_in_lens: torch.Tensor,
+    ):
+
+        tgt = ys_in_pad
+        tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
+
+        memory = hs_pad
+        memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
+
+        tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask)
+        tgt, tgt_mask, memory, memory_mask, _ = self.decoders[1](tgt, tgt_mask, memory, memory_mask)
+        tgt, tgt_mask, memory, memory_mask, _ = self.decoders[2](tgt, tgt_mask, memory, memory_mask)
+        tgt, tgt_mask, memory, memory_mask, _ = self.decoders[3](tgt, tgt_mask, memory, memory_mask)
+        tgt, tgt_mask, memory, memory_mask, _ = self.decoders[4](tgt, tgt_mask, memory, memory_mask)
+        attn_mat = self.decoders[5].get_attn_mat(tgt, tgt_mask, memory, memory_mask)
+        return attn_mat
 
     def forward_chunk(
         self,
diff --git a/funasr/models/seaco_paraformer/model.py b/funasr/models/seaco_paraformer/model.py
index caf2b15..cfdd26a 100644
--- a/funasr/models/seaco_paraformer/model.py
+++ b/funasr/models/seaco_paraformer/model.py
@@ -19,11 +19,9 @@
 
 from funasr.register import tables
 from funasr.utils import postprocess_utils
-from funasr.metrics.compute_acc import th_accuracy
 from funasr.models.paraformer.model import Paraformer
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.models.paraformer.search import Hypothesis
-from funasr.models.paraformer.cif_predictor import mae_loss
 from funasr.train_utils.device_funcs import force_gatherable
 from funasr.models.bicif_paraformer.model import BiCifParaformer
 from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
@@ -76,7 +74,7 @@
                 self.lstm_proj = torch.nn.Linear(self.inner_dim*2, self.inner_dim)
             else:
                 self.lstm_proj = None
-            self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
+            # self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
         elif self.bias_encoder_type == 'mean':
             self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
         else:
@@ -225,12 +223,8 @@
             
             # ASF Core
             if nfilter > 0 and nfilter < num_hot_word:
-                for dec in self.seaco_decoder.decoders:
-                    dec.reserve_attn = True
-                # cif_attended, _ = self.decoder2(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
-                dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
-                # cif_filter = torch.topk(self.decoder2.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1], min(nfilter, num_hot_word-1))[1].tolist()
-                hotword_scores = self.seaco_decoder.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1]
+                hotword_scores = self.seaco_decoder.forward_asf6(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
+                hotword_scores = hotword_scores[0].sum(0).sum(0)
                 # hotword_scores /= torch.sqrt(torch.tensor(hw_lengths)[:-1].float()).to(hotword_scores.device)
                 dec_filter = torch.topk(hotword_scores, min(nfilter, num_hot_word-1))[1].tolist()
                 add_filter = dec_filter
@@ -241,9 +235,6 @@
                 contextual_info = selected.squeeze(0).repeat(encoder_out.shape[0], 1, 1).to(encoder_out.device)
                 num_hot_word = contextual_info.shape[1]
                 _contextual_length = torch.Tensor([num_hot_word]).int().repeat(encoder_out.shape[0]).to(encoder_out.device)
-                for dec in self.seaco_decoder.decoders:
-                    dec.attn_mat = []
-                    dec.reserve_attn = False
             
             # SeACo Core
             cif_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
@@ -274,6 +265,8 @@
                                 hotword_lengths):
         if self.bias_encoder_type != 'lstm':
             logging.error("Unsupported bias encoder type")
+            
+        '''
         hw_embed = self.decoder.embed(hotword_pad)
         hw_embed, (_, _) = self.bias_encoder(hw_embed)
         if self.lstm_proj is not None:
@@ -281,26 +274,20 @@
         _ind = np.arange(0, hw_embed.shape[0]).tolist()
         selected = hw_embed[_ind, [i-1 for i in hotword_lengths.detach().cpu().tolist()]]
         return selected
+        '''
 
-    '''
-     def calc_predictor(self, encoder_out, encoder_out_lens):
-        encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
-            encoder_out.device)
-        pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index, pre_token_length2 = self.predictor(encoder_out,
-                                                                                                          None,
-                                                                                                          encoder_out_mask,
-                                                                                                          ignore_id=self.ignore_id)
-        return pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index
-
-
-    def calc_predictor_timestamp(self, encoder_out, encoder_out_lens, token_num):
-        encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
-            encoder_out.device)
-        ds_alphas, ds_cif_peak, us_alphas, us_peaks = self.predictor.get_upsample_timestamp(encoder_out,
-                                                                                            encoder_out_mask,
-                                                                                            token_num)
-        return ds_alphas, ds_cif_peak, us_alphas, us_peaks
-    '''        
+        # hw_embed = self.sac_embedding(hotword_pad)
+        hw_embed = self.decoder.embed(hotword_pad)
+        hw_embed = torch.nn.utils.rnn.pack_padded_sequence(hw_embed, hotword_lengths.cpu().type(torch.int64), batch_first=True, enforce_sorted=False)
+        packed_rnn_output, _ = self.bias_encoder(hw_embed)
+        rnn_output = torch.nn.utils.rnn.pad_packed_sequence(packed_rnn_output, batch_first=True)[0]
+        if self.lstm_proj is not None:
+            hw_hidden = self.lstm_proj(rnn_output)
+        else:
+            hw_hidden = rnn_output
+        _ind = np.arange(0, hw_hidden.shape[0]).tolist()
+        selected = hw_hidden[_ind, [i-1 for i in hotword_lengths.detach().cpu().tolist()]]
+        return selected      
   
     def inference(self,
                  data_in,

--
Gitblit v1.9.1