From 24f73665e2d8ea8e4de2fe4f900bc539d7f7b989 Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期一, 17 四月 2023 15:49:45 +0800
Subject: [PATCH] Merge pull request #367 from alibaba-damo-academy/dev_lhn2

---
 funasr/runtime/python/libtorch/demo.py |    8 ++++++--
 1 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/funasr/runtime/python/libtorch/demo.py b/funasr/runtime/python/libtorch/demo.py
index 0b2846a..1a9d9e9 100644
--- a/funasr/runtime/python/libtorch/demo.py
+++ b/funasr/runtime/python/libtorch/demo.py
@@ -1,11 +1,15 @@
-
 from funasr_torch import Paraformer
 
+
 model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+
 model = Paraformer(model_dir, batch_size=1)  # cpu
 # model = Paraformer(model_dir, batch_size=1, device_id=0)  # gpu
 
-wav_path = ['/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav']
+# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
+# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
+
+wav_path = "YourPath/xx.wav"
 
 result = model(wav_path)
 print(result)

--
Gitblit v1.9.1