From c1d4bd297a4418ef44882079c4845cfe64ed0b21 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 27 四月 2023 19:36:32 +0800
Subject: [PATCH] docs

---
 egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py
index c54ab8c..e0d1a4d 100644
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py
@@ -4,10 +4,10 @@
 if __name__ == "__main__":
     audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_he.wav"
     output_dir = "./results"
-    inference_pipline = pipeline(
+    inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch",
         output_dir=output_dir,
     )
-    rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"})
+    rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"})
     print(rec_result)

--
Gitblit v1.9.1