From a2f263bd05498cf4f35d78ee0ee8755ba84d09ae Mon Sep 17 00:00:00 2001
From: 语帆 <yf352572@alibaba-inc.com>
Date: 星期一, 04 三月 2024 17:09:05 +0800
Subject: [PATCH] atsr
---
examples/industrial_data_pretraining/lcbnet/demo.py | 26 ++------------------------
1 files changed, 2 insertions(+), 24 deletions(-)
diff --git a/examples/industrial_data_pretraining/lcbnet/demo.py b/examples/industrial_data_pretraining/lcbnet/demo.py
index d0870bc..4ca5255 100755
--- a/examples/industrial_data_pretraining/lcbnet/demo.py
+++ b/examples/industrial_data_pretraining/lcbnet/demo.py
@@ -8,28 +8,6 @@
model = AutoModel(model="iic/LCB-NET",
model_revision="v1.0.0")
+res = model.generate(input=("https://www.modelscope.cn/api/v1/models/iic/LCB-NET/repo?Revision=master&FilePath=example/asr_example.wav","https://www.modelscope.cn/api/v1/models/iic/LCB-NET/repo?Revision=master&FilePath=example/ocr.txt"),data_type=("sound", "text"))
-# example1
-res = model.generate(input='["~/.cache/modelscope/hub/iic/LCB-NET/example/asr_example.wav","~/.cache/modelscope/hub/iic/LCB-NET/example/ocr.txt"]',data_type='["sound", "text"]')
-
-print(res)
-
-
-'''
-# tensor or numpy as input
-# example2
-import torchaudio
-import os
-wav_file = os.path.join(model.model_path, "example/asr_example.wav")
-input_tensor, sample_rate = torchaudio.load(wav_file)
-input_tensor = input_tensor.mean(0)
-res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True)
-
-
-# example3
-import soundfile
-
-wav_file = os.path.join(model.model_path, "example/asr_example.wav")
-speech, sample_rate = soundfile.read(wav_file)
-res = model.generate(input=[speech], batch_size_s=300, is_final=True)
-'''
+print(res)
\ No newline at end of file
--
Gitblit v1.9.1