From aa3fe1a353bde71d106755d030d9e5300fbde328 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 22 七月 2024 19:02:15 +0800
Subject: [PATCH] python runtime
---
examples/industrial_data_pretraining/sense_voice/demo.py | 87 ++++++++++++++++++++++++++++++++++++-------
1 files changed, 73 insertions(+), 14 deletions(-)
diff --git a/examples/industrial_data_pretraining/sense_voice/demo.py b/examples/industrial_data_pretraining/sense_voice/demo.py
index 0d8ef97..b8a10a8 100644
--- a/examples/industrial_data_pretraining/sense_voice/demo.py
+++ b/examples/industrial_data_pretraining/sense_voice/demo.py
@@ -1,24 +1,83 @@
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
-# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# Copyright FunASR (https://github.com/FunAudioLLM/SenseVoice). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
from funasr import AutoModel
+from funasr.utils.postprocess_utils import rich_transcription_postprocess
-model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/SenseVoiceModelscope",
- vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
- vad_kwargs={"max_single_segment_time": 30000},
- )
+model_dir = "iic/SenseVoiceSmall"
-input_wav = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+model = AutoModel(
+ model=model_dir,
+ vad_model="fsmn-vad",
+ vad_kwargs={"max_single_segment_time": 30000},
+ device="cuda:0",
+)
-DecodingOptions = {
- "task": ("ASR", "AED", "SER"),
- "language": "auto",
- "fp16": True,
- "gain_event": True,
- }
+# en
+res = model.generate(
+ input=f"{model.model_path}/example/en.mp3",
+ cache={},
+ language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
+ use_itn=True,
+ batch_size_s=60,
+ merge_vad=True, #
+ merge_length_s=15,
+)
+text = rich_transcription_postprocess(res[0]["text"])
+print(text)
-res = model.generate(input=input_wav, batch_size_s=0, DecodingOptions=DecodingOptions)
-print(res)
+# zh
+res = model.generate(
+ input=f"{model.model_path}/example/zh.mp3",
+ cache={},
+ language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
+ use_itn=True,
+ batch_size_s=60,
+ merge_vad=True, #
+ merge_length_s=15,
+)
+text = rich_transcription_postprocess(res[0]["text"])
+print(text)
+
+# yue
+res = model.generate(
+ input=f"{model.model_path}/example/yue.mp3",
+ cache={},
+ language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
+ use_itn=True,
+ batch_size_s=60,
+ merge_vad=True, #
+ merge_length_s=15,
+)
+text = rich_transcription_postprocess(res[0]["text"])
+print(text)
+
+# ja
+res = model.generate(
+ input=f"{model.model_path}/example/ja.mp3",
+ cache={},
+ language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
+ use_itn=True,
+ batch_size_s=60,
+ merge_vad=True, #
+ merge_length_s=15,
+)
+text = rich_transcription_postprocess(res[0]["text"])
+print(text)
+
+
+# ko
+res = model.generate(
+ input=f"{model.model_path}/example/ko.mp3",
+ cache={},
+ language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
+ use_itn=True,
+ batch_size_s=60,
+ merge_vad=True, #
+ merge_length_s=15,
+)
+text = rich_transcription_postprocess(res[0]["text"])
+print(text)
--
Gitblit v1.9.1