From 3530688e0a1b1dfbb22dcd3324db97be5bbc0d9b Mon Sep 17 00:00:00 2001
From: takipipo <69394786+takipipo@users.noreply.github.com>
Date: 星期四, 16 一月 2025 10:33:23 +0800
Subject: [PATCH] Make Emotion2vec support onnx (#2359)

---
 runtime/python/libtorch/demo_contextual_paraformer.py |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/runtime/python/libtorch/demo_contextual_paraformer.py b/runtime/python/libtorch/demo_contextual_paraformer.py
index 9542d4d..306981c 100644
--- a/runtime/python/libtorch/demo_contextual_paraformer.py
+++ b/runtime/python/libtorch/demo_contextual_paraformer.py
@@ -1,9 +1,10 @@
+import torch
 from pathlib import Path
-from funasr_torch import Paraformer
+from funasr_torch.paraformer_bin import ContextualParaformer
 
-model_dir = "damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
-model = Paraformer(model_dir, batch_size=1)  # cpu
-# model = Paraformer(model_dir, batch_size=1, device_id=0)  # gpu
+model_dir = "iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
+device_id = 0 if torch.cuda.is_available() else -1
+model = ContextualParaformer(model_dir, batch_size=1, device_id=device_id)  # gpu
 
 wav_path = "{}/.cache/modelscope/hub/{}/example/asr_example.wav".format(Path.home(), model_dir)
 hotwords = "浣犵殑鐑瘝 榄旀惌"

--
Gitblit v1.9.1