From fe8ebd746bf0c0f57ef85ed342500cbf0e2c4e9e Mon Sep 17 00:00:00 2001
From: 维石 <shixian.shi@alibaba-inc.com>
Date: 星期二, 23 七月 2024 16:59:57 +0800
Subject: [PATCH] update gitignore

---
 runtime/python/libtorch/funasr_torch/sensevoice_bin.py |   17 +++++++----------
 1 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
index 19ed37d..11cd2c9 100644
--- a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
+++ b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
@@ -33,14 +33,13 @@
         self,
         model_dir: Union[str, Path] = None,
         batch_size: int = 1,
-        device_id: Union[str, int] = "-1",
         plot_timestamp_to: str = "",
         quantize: bool = False,
         intra_op_num_threads: int = 4,
         cache_dir: str = None,
         **kwargs,
     ):
-
+        self.device = kwargs.get("device", "cpu")
         if not Path(model_dir).exists():
             try:
                 from modelscope.hub.snapshot_download import snapshot_download
@@ -99,10 +98,10 @@
             end_idx = min(waveform_nums, beg_idx + self.batch_size)
             feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
             ctc_logits, encoder_out_lens = self.ort_infer(
-                torch.Tensor(feats),
-                torch.Tensor(feats_len),
-                torch.tensor([language]),
-                torch.tensor([textnorm]),
+                torch.Tensor(feats).to(self.device),
+                torch.Tensor(feats_len).to(self.device),
+                torch.tensor([language]).to(self.device),
+                torch.tensor([textnorm]).to(self.device),
             )
             # support batch_size=1 only currently
             x = ctc_logits[0, : encoder_out_lens[0].item(), :]
@@ -112,10 +111,8 @@
             mask = yseq != self.blank_id
             token_int = yseq[mask].tolist()
 
-            if tokenizer is not None:
-                asr_res.append(tokenizer.decode(token_int))
-            else:
-                asr_res.append(token_int)
+            asr_res.append(self.tokenizer.decode(token_int))
+
         return asr_res
 
     def load_data(self, wav_content: Union[str, np.ndarray, List[str]], fs: int = None) -> List:

--
Gitblit v1.9.1