From 2e5cd36e0f2887caf636f692b8f04699e82ec7a8 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 30 三月 2023 18:54:20 +0800
Subject: [PATCH] Merge pull request #317 from xiaowan0322/feat/cuda
---
funasr/runtime/python/onnxruntime/demo.py | 15 +++++----------
funasr/runtime/python/libtorch/demo.py | 15 +++++----------
funasr/export/export_model.py | 8 ++++++++
funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py | 10 ++++++++--
4 files changed, 26 insertions(+), 22 deletions(-)
diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index b1161cb..d3d119c 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -19,6 +19,7 @@
self,
cache_dir: Union[Path, str] = None,
onnx: bool = True,
+ device: str = "cpu",
quant: bool = True,
fallback_num: int = 0,
audio_in: str = None,
@@ -36,6 +37,7 @@
)
print("output dir: {}".format(self.cache_dir))
self.onnx = onnx
+ self.device = device
self.quant = quant
self.fallback_num = fallback_num
self.frontend = None
@@ -111,6 +113,10 @@
dummy_input = model.get_dummy_inputs(enc_size)
else:
dummy_input = model.get_dummy_inputs()
+
+ if self.device == 'cuda':
+ model = model.cuda()
+ dummy_input = tuple([i.cuda() for i in dummy_input])
# model_script = torch.jit.script(model)
model_script = torch.jit.trace(model, dummy_input)
@@ -234,6 +240,7 @@
parser.add_argument('--model-name', type=str, required=True)
parser.add_argument('--export-dir', type=str, required=True)
parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
+ parser.add_argument('--device', type=str, default='cpu', help='["cpu", "cuda"]')
parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
@@ -243,6 +250,7 @@
export_model = ModelExport(
cache_dir=args.export_dir,
onnx=args.type == 'onnx',
+ device=args.device,
quant=args.quantize,
fallback_num=args.fallback_num,
audio_in=args.audio_in,
diff --git a/funasr/runtime/python/libtorch/demo.py b/funasr/runtime/python/libtorch/demo.py
index b81aca6..1a9d9e9 100644
--- a/funasr/runtime/python/libtorch/demo.py
+++ b/funasr/runtime/python/libtorch/demo.py
@@ -1,20 +1,15 @@
from funasr_torch import Paraformer
-#model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model = Paraformer(model_dir, batch_size=2)
-# when using paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch, you should set pred_bias=0
-# plot_timestamp_to works only when using speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch"
-# model = Paraformer(model_dir, batch_size=2, pred_bias=0)
+model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+
+model = Paraformer(model_dir, batch_size=1) # cpu
+# model = Paraformer(model_dir, batch_size=1, device_id=0) # gpu
# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
-# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-# model = Paraformer(model_dir, batch_size=1)
# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
wav_path = "YourPath/xx.wav"
result = model(wav_path)
-print(result)
\ No newline at end of file
+print(result)
diff --git a/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py b/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
index 3c0606d..e169087 100644
--- a/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
+++ b/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
@@ -46,6 +46,7 @@
)
self.ort_infer = torch.jit.load(model_file)
self.batch_size = batch_size
+ self.device_id = device_id
self.plot_timestamp_to = plot_timestamp_to
self.pred_bias = pred_bias
@@ -58,8 +59,13 @@
end_idx = min(waveform_nums, beg_idx + self.batch_size)
feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
try:
- outputs = self.ort_infer(feats, feats_len)
- am_scores, valid_token_lens = outputs[0], outputs[1]
+ with torch.no_grad():
+ if int(self.device_id) == -1:
+ outputs = self.ort_infer(feats, feats_len)
+ am_scores, valid_token_lens = outputs[0], outputs[1]
+ else:
+ outputs = self.ort_infer(feats.cuda(), feats_len.cuda())
+ am_scores, valid_token_lens = outputs[0].cpu(), outputs[1].cpu()
if len(outputs) == 4:
# for BiCifParaformer Inference
us_alphas, us_peaks = outputs[2], outputs[3]
diff --git a/funasr/runtime/python/onnxruntime/demo.py b/funasr/runtime/python/onnxruntime/demo.py
index 351b702..f0f39d7 100644
--- a/funasr/runtime/python/onnxruntime/demo.py
+++ b/funasr/runtime/python/onnxruntime/demo.py
@@ -1,20 +1,15 @@
from funasr_onnx import Paraformer
-#model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model = Paraformer(model_dir, batch_size=2)
+model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-# when using paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch, you should set pred_bias=0
-# plot_timestamp_to works only when using speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch"
-# model = Paraformer(model_dir, batch_size=2, pred_bias=0)
+model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0) # cpu
+# model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0, device_id=0) # gpu
# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
-# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-# model = Paraformer(model_dir, batch_size=1)
# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
+
wav_path = "YourPath/xx.wav"
result = model(wav_path)
-print(result)
\ No newline at end of file
+print(result)
--
Gitblit v1.9.1