From fe8ebd746bf0c0f57ef85ed342500cbf0e2c4e9e Mon Sep 17 00:00:00 2001
From: 维石 <shixian.shi@alibaba-inc.com>
Date: 星期二, 23 七月 2024 16:59:57 +0800
Subject: [PATCH] update gitignore

---
 runtime/python/libtorch/funasr_torch/sensevoice_bin.py |   21 +++++++++++----------
 1 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
index 6808a5f..11cd2c9 100644
--- a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
+++ b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
@@ -33,14 +33,13 @@
         self,
         model_dir: Union[str, Path] = None,
         batch_size: int = 1,
-        device_id: Union[str, int] = "-1",
         plot_timestamp_to: str = "",
         quantize: bool = False,
         intra_op_num_threads: int = 4,
         cache_dir: str = None,
         **kwargs,
     ):
-
+        self.device = kwargs.get("device", "cpu")
         if not Path(model_dir).exists():
             try:
                 from modelscope.hub.snapshot_download import snapshot_download
@@ -78,6 +77,10 @@
         self.ort_infer = torch.jit.load(model_file)
         self.batch_size = batch_size
         self.blank_id = 0
+        self.lid_dict = {"auto": 0, "zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
+        self.lid_int_dict = {24884: 3, 24885: 4, 24888: 7, 24892: 11, 24896: 12, 24992: 13}
+        self.textnorm_dict = {"withitn": 14, "woitn": 15}
+        self.textnorm_int_dict = {25016: 14, 25017: 15}
 
     def __call__(self, wav_content: Union[str, np.ndarray, List[str]], **kwargs) -> List:
 
@@ -95,10 +98,10 @@
             end_idx = min(waveform_nums, beg_idx + self.batch_size)
             feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
             ctc_logits, encoder_out_lens = self.ort_infer(
-                torch.Tensor(feats),
-                torch.Tensor(feats_len),
-                torch.tensor([language]),
-                torch.tensor([textnorm]),
+                torch.Tensor(feats).to(self.device),
+                torch.Tensor(feats_len).to(self.device),
+                torch.tensor([language]).to(self.device),
+                torch.tensor([textnorm]).to(self.device),
             )
             # support batch_size=1 only currently
             x = ctc_logits[0, : encoder_out_lens[0].item(), :]
@@ -108,10 +111,8 @@
             mask = yseq != self.blank_id
             token_int = yseq[mask].tolist()
 
-            if tokenizer is not None:
-                asr_res.append(tokenizer.decode(token_int))
-            else:
-                asr_res.append(token_int)
+            asr_res.append(self.tokenizer.decode(token_int))
+
         return asr_res
 
     def load_data(self, wav_content: Union[str, np.ndarray, List[str]], fs: int = None) -> List:

--
Gitblit v1.9.1