From fb0da9f849a5d3bd473dcdbaf6197c6a5ff24a57 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 07 五月 2024 15:53:26 +0800
Subject: [PATCH] decoding key

---
 funasr/models/sense_voice/model.py                |    7 ++++---
 funasr/models/sense_voice/decoder.py              |    2 +-
 funasr/models/sense_voice/whisper_lib/decoding.py |    4 +++-
 3 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/funasr/models/sense_voice/decoder.py b/funasr/models/sense_voice/decoder.py
index dd00ca8..3c62072 100644
--- a/funasr/models/sense_voice/decoder.py
+++ b/funasr/models/sense_voice/decoder.py
@@ -472,7 +472,7 @@
         is_pad_mask = kwargs.get("is_pad_mask", False)
         is_pad_memory_mask = kwargs.get("is_pad_memory_mask", False)
 
-        fsmn_cache = cache[layer]["fsmn_cache"] if len(cache) > 0 or cache is None else None
+        fsmn_cache = cache[layer]["fsmn_cache"] if cache is not None and len(cache) > 0 else None
         # if fsmn_cache is not None:
         #     x = x[:, -1:]
         att_res, fsmn_cache = self.attn(self.attn_ln(x), mask=None, cache=fsmn_cache)
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index 8198706..dcf18fd 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -806,7 +806,6 @@
             if len(kwargs.get("data_type", [])) > 1:
                 audio_sample_list, text_token_int_list = audio_sample_list
                 text_token_int = text_token_int_list[0]
-                text_token_int = tokenizer.encode(text_token_int)
             else:
                 text_token_int = None
 
@@ -846,7 +845,7 @@
         )
 
         if text_token_int is not None:
-            i = 1
+            i = 0
             results = []
             ibest_writer = None
             if kwargs.get("output_dir") is not None:
@@ -855,7 +854,9 @@
                 ibest_writer = self.writer[f"1best_recog"]
 
             # 1. Forward decoder
-            ys_pad = torch.tensor(text_token_int, dtype=torch.int64).to(kwargs["device"])[None, :]
+            ys_pad = torch.tensor(sos_int + text_token_int, dtype=torch.int64).to(kwargs["device"])[
+                None, :
+            ]
             ys_pad_lens = torch.tensor([len(text_token_int)], dtype=torch.int64).to(
                 kwargs["device"]
             )[None, :]
diff --git a/funasr/models/sense_voice/whisper_lib/decoding.py b/funasr/models/sense_voice/whisper_lib/decoding.py
index 382a180..609d6a6 100644
--- a/funasr/models/sense_voice/whisper_lib/decoding.py
+++ b/funasr/models/sense_voice/whisper_lib/decoding.py
@@ -62,8 +62,10 @@
 
     else:
         x = x.to(mel.device)
+    # FIX(funasr): sense vocie
+    # logits = model.logits(x[:, :-1], mel)[:, -1]
+    logits = model.logits(x[:, :], mel)[:, -1]
 
-    logits = model.logits(x[:, :-1], mel)[:, -1]
     # collect detected languages; suppress all non-language tokens
     mask = torch.ones(logits.shape[-1], dtype=torch.bool)
     mask[list(tokenizer.all_language_tokens)] = False

--
Gitblit v1.9.1