From f57b68121a526baea43b2e93f4540d8a2995f633 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 29 四月 2024 15:15:24 +0800
Subject: [PATCH] batch

---
 examples/industrial_data_pretraining/seaco_paraformer/demo.py |   34 +++++++++++++++++++---------------
 1 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
index e9e226d..a88e880 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -5,26 +5,30 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
-                  model_revision="v2.0.4",
-                  vad_model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
-                  vad_model_revision="v2.0.4",
-                  punc_model="damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
-                  punc_model_revision="v2.0.4",
-                  # spk_model="damo/speech_campplus_sv_zh-cn_16k-common",
-                  # spk_model_revision="v2.0.2",
-                  )
+model = AutoModel(
+    model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+    # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+    # punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
+    # spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
+)
 
 
 # example1
-res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
-                     hotword='杈炬懇闄� 榄旀惌',
-                     # sentence_timestamp=True,  # return sentence level information when spk_model is not given
-                    )
+res = model.generate(
+    input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
+    hotword="杈炬懇闄� 榄旀惌",
+    # return_raw_text=True,     # return raw text recognition results splited by space of equal length with timestamp
+    # preset_spk_num=2,         # preset speaker num for speaker cluster model
+    # sentence_timestamp=True,  # return sentence level information when spk_model is not given
+)
 print(res)
 
+
+"""
+# tensor or numpy as input
 # example2
 import torchaudio
+import os
 wav_file = os.path.join(model.model_path, "example/asr_example.wav")
 input_tensor, sample_rate = torchaudio.load(wav_file)
 input_tensor = input_tensor.mean(0)
@@ -33,8 +37,8 @@
 
 # example3
 import soundfile
-import os
+
 wav_file = os.path.join(model.model_path, "example/asr_example.wav")
 speech, sample_rate = soundfile.read(wav_file)
 res = model.generate(input=[speech], batch_size_s=300, is_final=True)
-
+"""

--
Gitblit v1.9.1