| | |
| | | |
| | | parser.add_argument("--punc_model", |
| | | type=str, |
| | | default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727", |
| | | default="", |
| | | help="model from modelscope") |
| | | parser.add_argument("--ngpu", |
| | | type=int, |
| | |
| | | mode='online', |
| | | ngpu=args.ngpu, |
| | | ) |
| | | param_dict_vad = {'in_cache': dict()} |
| | | param_dict_vad = {'in_cache': dict(), "is_final": False} |
| | | |
| | | # asr |
| | | param_dict_asr = {} |
| | |
| | | param_dict=param_dict_asr, |
| | | ngpu=args.ngpu, |
| | | ) |
| | | |
| | | param_dict_punc = {'cache': list()} |
| | | inference_pipeline_punc = pipeline( |
| | | task=Tasks.punctuation, |
| | | model=args.punc_model, |
| | | model_revision=None, |
| | | ngpu=args.ngpu, |
| | | ) |
| | | if args.punc_model != "": |
| | | param_dict_punc = {'cache': list()} |
| | | inference_pipeline_punc = pipeline( |
| | | task=Tasks.punctuation, |
| | | model=args.punc_model, |
| | | model_revision=None, |
| | | ngpu=args.ngpu, |
| | | ) |
| | | else: |
| | | inference_pipeline_punc = None |
| | | |
| | | print("model loaded") |
| | | |
| | |
| | | |
| | | def asr(): # 推理 |
| | | global inference_pipeline2 |
| | | global speek |
| | | global speek, param_dict_punc |
| | | while True: |
| | | while not speek.empty(): |
| | | audio_in = speek.get() |
| | | speek.task_done() |
| | | rec_result = inference_pipeline_asr(audio_in=audio_in) |
| | | rec_result_punc = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc) |
| | | print(rec_result_punc) |
| | | if len(audio_in) > 0: |
| | | rec_result = inference_pipeline_asr(audio_in=audio_in) |
| | | if inference_pipeline_punc is not None and 'text' in rec_result: |
| | | rec_result = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc) |
| | | print(rec_result["text"] if "text" in rec_result else rec_result) |
| | | time.sleep(0.1) |
| | | time.sleep(0.1) |
| | | |