From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交

---
 runtime/python/libtorch/funasr_torch/sensevoice_bin.py |   21 ++++++++++++++-------
 1 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
index 8de1c54..d4444e7 100644
--- a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
+++ b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
@@ -158,6 +158,9 @@
             feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
             _language_list = language_list[beg_idx:end_idx]
             _textnorm_list = textnorm_list[beg_idx:end_idx]
+            if not len(_language_list):
+                _language_list = [language_list[0]]
+                _textnorm_list = [textnorm_list[0]]
             B = feats.shape[0]
             if len(_language_list) == 1 and B != 1:
                 _language_list = _language_list * B
@@ -170,15 +173,19 @@
                 torch.tensor(_language_list).to(self.device),
                 torch.tensor(_textnorm_list).to(self.device),
             )
-            # support batch_size=1 only currently
-            x = ctc_logits[0, : encoder_out_lens[0].item(), :]
-            yseq = x.argmax(dim=-1)
-            yseq = torch.unique_consecutive(yseq, dim=-1)
+            for b in range(feats.shape[0]):
+                # back to torch.Tensor
+                if isinstance(ctc_logits, np.ndarray):
+                    ctc_logits = torch.from_numpy(ctc_logits).float()
+                # support batch_size=1 only currently
+                x = ctc_logits[b, : encoder_out_lens[b].item(), :]
+                yseq = x.argmax(dim=-1)
+                yseq = torch.unique_consecutive(yseq, dim=-1)
 
-            mask = yseq != self.blank_id
-            token_int = yseq[mask].tolist()
+                mask = yseq != self.blank_id
+                token_int = yseq[mask].tolist()
 
-            asr_res.append(self.tokenizer.decode(token_int))
+                asr_res.append(self.tokenizer.decode(token_int))
 
         return asr_res
 

--
Gitblit v1.9.1