From be375fe5d8e7e6e7390ebe374bfaddee9c5abe90 Mon Sep 17 00:00:00 2001 From: zhifu gao <zhifu.gzf@alibaba-inc.com> Date: 星期四, 30 三月 2023 18:53:59 +0800 Subject: [PATCH] Merge branch 'main' into feat/cuda --- funasr/runtime/python/libtorch/demo.py | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-) diff --git a/funasr/runtime/python/libtorch/demo.py b/funasr/runtime/python/libtorch/demo.py index 0b2846a..1a9d9e9 100644 --- a/funasr/runtime/python/libtorch/demo.py +++ b/funasr/runtime/python/libtorch/demo.py @@ -1,11 +1,15 @@ - from funasr_torch import Paraformer + model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" + model = Paraformer(model_dir, batch_size=1) # cpu # model = Paraformer(model_dir, batch_size=1, device_id=0) # gpu -wav_path = ['/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav'] +# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps +# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png") + +wav_path = "YourPath/xx.wav" result = model(wav_path) print(result) -- Gitblit v1.9.1