From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交
---
examples/industrial_data_pretraining/lcbnet/demo.py | 32 ++++++++------------------------
1 files changed, 8 insertions(+), 24 deletions(-)
diff --git a/examples/industrial_data_pretraining/lcbnet/demo.py b/examples/industrial_data_pretraining/lcbnet/demo.py
index d0870bc..39e11ee 100755
--- a/examples/industrial_data_pretraining/lcbnet/demo.py
+++ b/examples/industrial_data_pretraining/lcbnet/demo.py
@@ -5,31 +5,15 @@
from funasr import AutoModel
-model = AutoModel(model="iic/LCB-NET",
- model_revision="v1.0.0")
+model = AutoModel(model="iic/LCB-NET", model_revision="v1.0.0")
-# example1
-res = model.generate(input='["~/.cache/modelscope/hub/iic/LCB-NET/example/asr_example.wav","~/.cache/modelscope/hub/iic/LCB-NET/example/ocr.txt"]',data_type='["sound", "text"]')
+res = model.generate(
+ input=(
+ "https://www.modelscope.cn/api/v1/models/iic/LCB-NET/repo?Revision=master&FilePath=example/asr_example.wav",
+ "https://www.modelscope.cn/api/v1/models/iic/LCB-NET/repo?Revision=master&FilePath=example/ocr.txt",
+ ),
+ data_type=("sound", "text"),
+)
print(res)
-
-
-'''
-# tensor or numpy as input
-# example2
-import torchaudio
-import os
-wav_file = os.path.join(model.model_path, "example/asr_example.wav")
-input_tensor, sample_rate = torchaudio.load(wav_file)
-input_tensor = input_tensor.mean(0)
-res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True)
-
-
-# example3
-import soundfile
-
-wav_file = os.path.join(model.model_path, "example/asr_example.wav")
-speech, sample_rate = soundfile.read(wav_file)
-res = model.generate(input=[speech], batch_size_s=300, is_final=True)
-'''
--
Gitblit v1.9.1