From a33bc50e786270beecbd290028a498f61954889d Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 22 七月 2024 21:13:17 +0800
Subject: [PATCH] python runtime
---
examples/industrial_data_pretraining/sense_voice/demo_libtorch.py | 18 ++++++++++++++++++
examples/industrial_data_pretraining/sense_voice/demo_onnx.py | 19 +++++++++++++++++++
2 files changed, 37 insertions(+), 0 deletions(-)
diff --git a/examples/industrial_data_pretraining/sense_voice/demo_libtorch.py b/examples/industrial_data_pretraining/sense_voice/demo_libtorch.py
new file mode 100644
index 0000000..530fdfa
--- /dev/null
+++ b/examples/industrial_data_pretraining/sense_voice/demo_libtorch.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/FunAudioLLM/SenseVoice). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
+from pathlib import Path
+from funasr_torch import SenseVoiceSmall
+from funasr_torch.utils.postprocess_utils import rich_transcription_postprocess
+
+
+model_dir = "iic/SenseVoiceSmall"
+
+model = SenseVoiceSmall(model_dir, batch_size=10, device="cuda:0")
+
+wav_or_scp = ["{}/.cache/modelscope/hub/{}/example/en.mp3".format(Path.home(), model_dir)]
+
+res = model(wav_or_scp, language="auto", use_itn=True)
+print([rich_transcription_postprocess(i) for i in res])
diff --git a/examples/industrial_data_pretraining/sense_voice/demo_onnx.py b/examples/industrial_data_pretraining/sense_voice/demo_onnx.py
new file mode 100644
index 0000000..f120728
--- /dev/null
+++ b/examples/industrial_data_pretraining/sense_voice/demo_onnx.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/FunAudioLLM/SenseVoice). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
+from pathlib import Path
+from funasr_onnx import SenseVoiceSmall
+from funasr_onnx.utils.postprocess_utils import rich_transcription_postprocess
+
+
+model_dir = "iic/SenseVoiceSmall"
+
+model = SenseVoiceSmall(model_dir, batch_size=10, quantize=True)
+
+# inference
+wav_or_scp = ["{}/.cache/modelscope/hub/{}/example/en.mp3".format(Path.home(), model_dir)]
+
+res = model(wav_or_scp, language="auto", use_itn=True)
+print([rich_transcription_postprocess(i) for i in res])
--
Gitblit v1.9.1