From 3530688e0a1b1dfbb22dcd3324db97be5bbc0d9b Mon Sep 17 00:00:00 2001 From: takipipo <69394786+takipipo@users.noreply.github.com> Date: 星期四, 16 一月 2025 10:33:23 +0800 Subject: [PATCH] Make Emotion2vec support onnx (#2359) --- examples/industrial_data_pretraining/qwen_audio/demo.py | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/examples/industrial_data_pretraining/qwen_audio/demo.py b/examples/industrial_data_pretraining/qwen_audio/demo.py index 41bbb99..04a8e58 100644 --- a/examples/industrial_data_pretraining/qwen_audio/demo.py +++ b/examples/industrial_data_pretraining/qwen_audio/demo.py @@ -9,5 +9,8 @@ model = AutoModel(model="Qwen-Audio") -res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", language=None) +audio_in = "https://github.com/QwenLM/Qwen-Audio/raw/main/assets/audio/1272-128104-0000.flac" +prompt = "<|startoftranscription|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>" + +res = model.generate(input=audio_in, prompt=prompt) print(res) -- Gitblit v1.9.1