From d550a62bead38c06ab1430ae63c30fa46243e3c3 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期四, 29 六月 2023 19:40:39 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main
---
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/infer.py | 33 +--------------------------------
1 files changed, 1 insertions(+), 32 deletions(-)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/infer.py
index a0f0965..da8859e 100644
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/infer.py
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/infer.py
@@ -1,33 +1,3 @@
-<<<<<<< HEAD
-import os
-import shutil
-import argparse
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-
-def modelscope_infer(args):
- os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
- inference_pipeline = pipeline(
- task=Tasks.auto_speech_recognition,
- model=args.model,
- output_dir=args.output_dir,
- batch_size=args.batch_size,
- param_dict={"decoding_model": args.decoding_mode, "hotword": args.hotword_txt}
- )
- inference_pipeline(audio_in=args.audio_in)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument('--model', type=str, default="damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch")
- parser.add_argument('--audio_in', type=str, default="./data/test/wav.scp")
- parser.add_argument('--output_dir', type=str, default="./results/")
- parser.add_argument('--decoding_mode', type=str, default="normal")
- parser.add_argument('--hotword_txt', type=str, default=None)
- parser.add_argument('--batch_size', type=int, default=64)
- parser.add_argument('--gpuid', type=str, default="0")
- args = parser.parse_args()
- modelscope_infer(args)
-=======
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
@@ -40,5 +10,4 @@
output_dir=output_dir,
)
rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"})
- print(rec_result)
->>>>>>> main
+ print(rec_result)
\ No newline at end of file
--
Gitblit v1.9.1