wanchen.swc
2023-03-30 cfc4d402093060fe087424b0a6be4e2b2546eae8
[Export] support gpu inference
4个文件已修改
19 ■■■■ 已修改文件
funasr/export/export_model.py 8 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/libtorch/demo.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py 3 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/onnxruntime/demo.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/export/export_model.py
@@ -19,6 +19,7 @@
        self,
        cache_dir: Union[Path, str] = None,
        onnx: bool = True,
        device: str = "cpu",
        quant: bool = True,
        fallback_num: int = 0,
        audio_in: str = None,
@@ -36,6 +37,7 @@
        )
        print("output dir: {}".format(self.cache_dir))
        self.onnx = onnx
        self.device = device
        self.quant = quant
        self.fallback_num = fallback_num
        self.frontend = None
@@ -111,6 +113,10 @@
            dummy_input = model.get_dummy_inputs(enc_size)
        else:
            dummy_input = model.get_dummy_inputs()
        if self.device == 'cuda':
            model = model.cuda()
            dummy_input = tuple([i.cuda() for i in dummy_input])
        # model_script = torch.jit.script(model)
        model_script = torch.jit.trace(model, dummy_input)
@@ -234,6 +240,7 @@
    parser.add_argument('--model-name', type=str, required=True)
    parser.add_argument('--export-dir', type=str, required=True)
    parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
    parser.add_argument('--device', type=str, default='cpu', help='["cpu", "cuda"]')
    parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
    parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
    parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
@@ -243,6 +250,7 @@
    export_model = ModelExport(
        cache_dir=args.export_dir,
        onnx=args.type == 'onnx',
        device=args.device,
        quant=args.quantize,
        fallback_num=args.fallback_num,
        audio_in=args.audio_in,
funasr/runtime/python/libtorch/demo.py
@@ -2,9 +2,9 @@
from funasr_torch import Paraformer
model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
model = Paraformer(model_dir, batch_size=1)
model = Paraformer(model_dir, batch_size=1, device_id=0)
wav_path = ['/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav']
result = model(wav_path)
print(result)
print(result)
funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
@@ -58,6 +58,9 @@
            end_idx = min(waveform_nums, beg_idx + self.batch_size)
            feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
            try:
                if int(device_id) != -1:
                    feats = feats.cuda()
                    feats_len = feats_len.cuda()
                outputs = self.ort_infer(feats, feats_len)
                am_scores, valid_token_lens = outputs[0], outputs[1]
                if len(outputs) == 4:
funasr/runtime/python/onnxruntime/demo.py
@@ -7,9 +7,9 @@
# if you use paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch, you should set pred_bias=0
# plot_timestamp_to works only when using speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch
model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0)
model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0, device_id=0)
wav_path = "/Users/shixian/code/funasr/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/example/asr_example.wav"
result = model(wav_path)
print(result)
print(result)