From e24dbdc496debec225414d4d2c760f5775e64f2a Mon Sep 17 00:00:00 2001
From: 天地 <tiandiweizun@gmail.com>
Date: 星期三, 26 三月 2025 13:44:41 +0800
Subject: [PATCH] 感觉应该从文件读取更合适,因为上面判断了文件存在,且可以读取,如果本身是文本的话,下面也会有逻辑进行处理 (#2452)
---
runtime/python/libtorch/demo_contextual_paraformer.py | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/runtime/python/libtorch/demo_contextual_paraformer.py b/runtime/python/libtorch/demo_contextual_paraformer.py
index 9542d4d..306981c 100644
--- a/runtime/python/libtorch/demo_contextual_paraformer.py
+++ b/runtime/python/libtorch/demo_contextual_paraformer.py
@@ -1,9 +1,10 @@
+import torch
from pathlib import Path
-from funasr_torch import Paraformer
+from funasr_torch.paraformer_bin import ContextualParaformer
-model_dir = "damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
-model = Paraformer(model_dir, batch_size=1) # cpu
-# model = Paraformer(model_dir, batch_size=1, device_id=0) # gpu
+model_dir = "iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
+device_id = 0 if torch.cuda.is_available() else -1
+model = ContextualParaformer(model_dir, batch_size=1, device_id=device_id) # gpu
wav_path = "{}/.cache/modelscope/hub/{}/example/asr_example.wav".format(Path.home(), model_dir)
hotwords = "浣犵殑鐑瘝 榄旀惌"
--
Gitblit v1.9.1