From 2e5cd36e0f2887caf636f692b8f04699e82ec7a8 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 30 三月 2023 18:54:20 +0800
Subject: [PATCH] Merge pull request #317 from xiaowan0322/feat/cuda

---
 funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py |   10 ++++++++--
 1 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py b/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
index 3c0606d..e169087 100644
--- a/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
+++ b/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
@@ -46,6 +46,7 @@
         )
         self.ort_infer = torch.jit.load(model_file)
         self.batch_size = batch_size
+        self.device_id = device_id
         self.plot_timestamp_to = plot_timestamp_to
         self.pred_bias = pred_bias
 
@@ -58,8 +59,13 @@
             end_idx = min(waveform_nums, beg_idx + self.batch_size)
             feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
             try:
-                outputs = self.ort_infer(feats, feats_len)
-                am_scores, valid_token_lens = outputs[0], outputs[1]
+                with torch.no_grad():
+                    if int(self.device_id) == -1:
+                        outputs = self.ort_infer(feats, feats_len)
+                        am_scores, valid_token_lens = outputs[0], outputs[1]
+                    else:
+                        outputs = self.ort_infer(feats.cuda(), feats_len.cuda())
+                        am_scores, valid_token_lens = outputs[0].cpu(), outputs[1].cpu()
                 if len(outputs) == 4:
                     # for BiCifParaformer Inference
                     us_alphas, us_peaks = outputs[2], outputs[3]

--
Gitblit v1.9.1