From 48a894c8e3babed74c2f9ab8832fe6cefe3967aa Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 16 五月 2024 14:50:27 +0800
Subject: [PATCH] Merge branch 'dev_gzf_deepspeed' of github.com:alibaba-damo-academy/FunASR into dev_gzf_deepspeed merge

---
 funasr/models/sense_voice/decoder.py |   28 ++++++++++++++++++++++++++--
 1 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/funasr/models/sense_voice/decoder.py b/funasr/models/sense_voice/decoder.py
index 8a4a2ce..03b7532 100644
--- a/funasr/models/sense_voice/decoder.py
+++ b/funasr/models/sense_voice/decoder.py
@@ -337,6 +337,29 @@
 
         return x
 
+    def init_state(self, x):
+        state = {}
+
+        return state
+
+    def final_score(self, state) -> float:
+        """Score eos (optional).
+
+        Args:
+            state: Scorer state for prefix tokens
+
+        Returns:
+            float: final score
+
+        """
+        return 0.0
+
+    def score(self, ys, state, x):
+        """Score."""
+        ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
+        logp = self.forward(ys.unsqueeze(0), x.unsqueeze(0), cache=state)
+        return logp.squeeze(0)[-1, :], state
+
 
 class MultiHeadedAttentionSANMDecoder(nn.Module):
     """Multi-Head Attention layer.
@@ -449,7 +472,7 @@
         is_pad_mask = kwargs.get("is_pad_mask", False)
         is_pad_memory_mask = kwargs.get("is_pad_memory_mask", False)
 
-        fsmn_cache = cache[layer]["fsmn_cache"] if len(cache) > 0 else None
+        fsmn_cache = cache[layer]["fsmn_cache"] if cache is not None and len(cache) > 0 else None
         # if fsmn_cache is not None:
         #     x = x[:, -1:]
         att_res, fsmn_cache = self.attn(self.attn_ln(x), mask=None, cache=fsmn_cache)
@@ -576,5 +599,6 @@
     def score(self, ys, state, x):
         """Score."""
         ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
-        logp = self.forward(ys.unsqueeze(0), x.unsqueeze(0), cache=state)
+        logp = self.forward(ys.unsqueeze(0), x.unsqueeze(0), cache=None)
+        logp = torch.log_softmax(logp, dim=-1)
         return logp.squeeze(0)[-1, :], state

--
Gitblit v1.9.1