From 9b4e9cc8a0311e5243d69b73ed073e7ea441982e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 三月 2024 16:05:29 +0800
Subject: [PATCH] train update
---
examples/industrial_data_pretraining/seaco_paraformer/demo.py | 40 ++++++++++++++++++++++++++++++----------
1 files changed, 30 insertions(+), 10 deletions(-)
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
index bba5268..c7f78d3 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -6,17 +6,37 @@
from funasr import AutoModel
model = AutoModel(model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
- model_revision="v2.0.4",
- vad_model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
- vad_model_revision="v2.0.4",
- punc_model="damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
- punc_model_revision="v2.0.4",
- # spk_model="damo/speech_campplus_sv_zh-cn_16k-common",
- # spk_model_revision="v2.0.2",
+ # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+ # punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
+ # spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
)
-res = model.generate(input="/Users/shixian/Downloads/output_16000.wav",
+
+# example1
+res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
hotword='杈炬懇闄� 榄旀惌',
- # sentence_timestamp=True,
+ # return_raw_text=True, # return raw text recognition results splited by space of equal length with timestamp
+ # preset_spk_num=2, # preset speaker num for speaker cluster model
+ # sentence_timestamp=True, # return sentence level information when spk_model is not given
)
-print(res)
\ No newline at end of file
+print(res)
+
+
+'''
+# tensor or numpy as input
+# example2
+import torchaudio
+import os
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+input_tensor, sample_rate = torchaudio.load(wav_file)
+input_tensor = input_tensor.mean(0)
+res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True)
+
+
+# example3
+import soundfile
+
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+speech, sample_rate = soundfile.read(wav_file)
+res = model.generate(input=[speech], batch_size_s=300, is_final=True)
+'''
--
Gitblit v1.9.1