From a4254193dec9164a78a506eb1b647f9b546f94b2 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 12 九月 2023 10:57:47 +0800
Subject: [PATCH] punc large

---
 egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/demo.py |    3 ++-
 docs/images/wechat.png                                                                                          |    0 
 2 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/docs/images/wechat.png b/docs/images/wechat.png
index 99e6bb6..810cb32 100644
--- a/docs/images/wechat.png
+++ b/docs/images/wechat.png
Binary files differ
diff --git a/egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/demo.py b/egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/demo.py
index 510e5ed..a7cd74b 100644
--- a/egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/demo.py
+++ b/egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/demo.py
@@ -8,9 +8,10 @@
         task=Tasks.auto_speech_recognition,
         model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
         vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',
+        #punc_model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
         punc_model='damo/punc_ct-transformer_cn-en-common-vocab471067-large',
         output_dir=output_dir,
     )
-    rec_result = inference_pipeline(audio_in=audio_in, batch_size_token=5000, batch_size_token_threshold_s=40)
+    rec_result = inference_pipeline(audio_in=audio_in, batch_size_token=5000, batch_size_token_threshold_s=40, max_single_segment_time=6000)
     print(rec_result)
 

--
Gitblit v1.9.1