From cfc4d402093060fe087424b0a6be4e2b2546eae8 Mon Sep 17 00:00:00 2001 From: wanchen.swc <wanchen.swc@alibaba-inc.com> Date: 星期四, 30 三月 2023 18:15:15 +0800 Subject: [PATCH] [Export] support gpu inference --- funasr/runtime/python/onnxruntime/demo.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/funasr/runtime/python/onnxruntime/demo.py b/funasr/runtime/python/onnxruntime/demo.py index 248d2e1..c938db4 100644 --- a/funasr/runtime/python/onnxruntime/demo.py +++ b/funasr/runtime/python/onnxruntime/demo.py @@ -7,9 +7,9 @@ # if you use paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch, you should set pred_bias=0 # plot_timestamp_to works only when using speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch -model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0) +model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0, device_id=0) wav_path = "/Users/shixian/code/funasr/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/example/asr_example.wav" result = model(wav_path) -print(result) \ No newline at end of file +print(result) -- Gitblit v1.9.1