From 3bc0efcc1442a722dd00aa30857688e1252b07be Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 22 七月 2024 20:05:03 +0800
Subject: [PATCH] python runtime

---
 runtime/python/libtorch/funasr_torch/sensevoice_bin.py |   17 ++++++++++-------
 1 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
index 6808a5f..2f66f4e 100644
--- a/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
+++ b/runtime/python/libtorch/funasr_torch/sensevoice_bin.py
@@ -33,14 +33,13 @@
         self,
         model_dir: Union[str, Path] = None,
         batch_size: int = 1,
-        device_id: Union[str, int] = "-1",
         plot_timestamp_to: str = "",
         quantize: bool = False,
         intra_op_num_threads: int = 4,
         cache_dir: str = None,
         **kwargs,
     ):
-
+        self.device = kwargs.get("device", "cpu")
         if not Path(model_dir).exists():
             try:
                 from modelscope.hub.snapshot_download import snapshot_download
@@ -78,6 +77,10 @@
         self.ort_infer = torch.jit.load(model_file)
         self.batch_size = batch_size
         self.blank_id = 0
+        self.lid_dict = {"auto": 0, "zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
+        self.lid_int_dict = {24884: 3, 24885: 4, 24888: 7, 24892: 11, 24896: 12, 24992: 13}
+        self.textnorm_dict = {"withitn": 14, "woitn": 15}
+        self.textnorm_int_dict = {25016: 14, 25017: 15}
 
     def __call__(self, wav_content: Union[str, np.ndarray, List[str]], **kwargs) -> List:
 
@@ -95,10 +98,10 @@
             end_idx = min(waveform_nums, beg_idx + self.batch_size)
             feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
             ctc_logits, encoder_out_lens = self.ort_infer(
-                torch.Tensor(feats),
-                torch.Tensor(feats_len),
-                torch.tensor([language]),
-                torch.tensor([textnorm]),
+                torch.Tensor(feats).to(self.device),
+                torch.Tensor(feats_len).to(self.device),
+                torch.tensor([language]).to(self.device),
+                torch.tensor([textnorm]).to(self.device),
             )
             # support batch_size=1 only currently
             x = ctc_logits[0, : encoder_out_lens[0].item(), :]
@@ -109,7 +112,7 @@
             token_int = yseq[mask].tolist()
 
             if tokenizer is not None:
-                asr_res.append(tokenizer.decode(token_int))
+                asr_res.append(self.tokenizer.decode(token_int))
             else:
                 asr_res.append(token_int)
         return asr_res

--
Gitblit v1.9.1