From 447222c00e545906879364c641846f399290dcee Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 25 三月 2024 12:37:35 +0800
Subject: [PATCH] install requirements automatically

---
 funasr/models/llm_asr_nar/model.py                                      |    4 
 funasr/download/download_from_hub.py                                    |    2 
 examples/industrial_data_pretraining/paraformer/export.py               |    2 
 examples/industrial_data_pretraining/ct_transformer/export.py           |    2 
 examples/industrial_data_pretraining/bicif_paraformer/export.py         |    2 
 examples/common_voice/whisper_lid/demo_funasr.py                        |    2 
 examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh         |    2 
 examples/industrial_data_pretraining/monotonic_aligner/demo.sh          |    2 
 examples/industrial_data_pretraining/scama/demo.py                      |    2 
 examples/industrial_data_pretraining/paraformer_streaming/export.sh     |    2 
 examples/industrial_data_pretraining/contextual_paraformer/demo.sh      |    2 
 examples/industrial_data_pretraining/paraformer_streaming/demo.py       |    2 
 examples/industrial_data_pretraining/paraformer/infer.sh                |    2 
 examples/industrial_data_pretraining/ct_transformer_streaming/demo.py   |    2 
 examples/industrial_data_pretraining/ct_transformer_streaming/export.py |    2 
 examples/industrial_data_pretraining/contextual_paraformer/demo.py      |    2 
 examples/industrial_data_pretraining/seaco_paraformer/demo.sh           |    6 +-
 examples/industrial_data_pretraining/conformer/demo.py                  |    2 
 examples/industrial_data_pretraining/bicif_paraformer/demo.py           |    6 +-
 examples/industrial_data_pretraining/fsmn_vad_streaming/export.sh       |    2 
 examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh          |    6 +-
 examples/industrial_data_pretraining/bicif_paraformer/export.sh         |    2 
 examples/industrial_data_pretraining/paraformer_streaming/finetune.sh   |    2 
 examples/industrial_data_pretraining/ct_transformer/demo.py             |    4 
 examples/industrial_data_pretraining/bicif_paraformer/finetune.sh       |    2 
 examples/industrial_data_pretraining/paraformer/finetune.sh             |    2 
 examples/industrial_data_pretraining/paraformer_streaming/export.py     |    2 
 examples/industrial_data_pretraining/whisper/infer.sh                   |    2 
 examples/common_voice/whisper_lid/demo_modelscope.py                    |    2 
 examples/industrial_data_pretraining/paraformer_streaming/demo.sh       |    2 
 examples/industrial_data_pretraining/emotion2vec/demo.py                |    4 
 examples/industrial_data_pretraining/paraformer/demo.py                 |    8 +-
 examples/industrial_data_pretraining/scama/demo.sh                      |    2 
 examples/industrial_data_pretraining/uniasr/demo.sh                     |    2 
 examples/industrial_data_pretraining/paraformer/export.sh               |    2 
 examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh   |    2 
 examples/industrial_data_pretraining/ct_transformer/export.sh           |    2 
 examples/industrial_data_pretraining/uniasr/demo.py                     |    4 
 funasr/models/llm_asr/model.py                                          |    2 
 examples/industrial_data_pretraining/paraformer-zh-spk/demo.py          |    6 +-
 examples/industrial_data_pretraining/contextual_paraformer/finetune.sh  |    2 
 funasr/auto/auto_model.py                                               |    6 +-
 examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py         |    2 
 examples/industrial_data_pretraining/monotonic_aligner/demo.py          |    2 
 examples/industrial_data_pretraining/fsmn_vad_streaming/export.py       |    2 
 examples/industrial_data_pretraining/bicif_paraformer/demo.sh           |    6 +-
 examples/industrial_data_pretraining/ct_transformer/demo.sh             |    4 
 examples/industrial_data_pretraining/conformer/demo.sh                  |    2 
 examples/industrial_data_pretraining/seaco_paraformer/demo.py           |    6 +-
 examples/industrial_data_pretraining/ct_transformer_streaming/export.sh |    2 
 50 files changed, 72 insertions(+), 72 deletions(-)

diff --git a/examples/common_voice/whisper_lid/demo_funasr.py b/examples/common_voice/whisper_lid/demo_funasr.py
index 9af790e..50f4e2a 100644
--- a/examples/common_voice/whisper_lid/demo_funasr.py
+++ b/examples/common_voice/whisper_lid/demo_funasr.py
@@ -12,7 +12,7 @@
     "example_ko.mp3",
 ]
 
-model = AutoModel(model="iic/speech_whisper-large_lid_multilingual_pytorch", model_revision="v2.0.4")
+model = AutoModel(model="iic/speech_whisper-large_lid_multilingual_pytorch", model_revision="master")
 for wav_id in multilingual_wavs:
     wav_file = f"{model.model_path}/examples/{wav_id}"
     res = model.generate(input=wav_file, data_type="sound", inference_clip_length=250)
diff --git a/examples/common_voice/whisper_lid/demo_modelscope.py b/examples/common_voice/whisper_lid/demo_modelscope.py
index cce389a..e55a972 100644
--- a/examples/common_voice/whisper_lid/demo_modelscope.py
+++ b/examples/common_voice/whisper_lid/demo_modelscope.py
@@ -15,7 +15,7 @@
 
 inference_pipeline = pipeline(
     task=Tasks.auto_speech_recognition,
-    model='iic/speech_whisper-large_lid_multilingual_pytorch', model_revision="v2.0.4")
+    model='iic/speech_whisper-large_lid_multilingual_pytorch', model_revision="master")
 
 for wav in multilingual_wavs:
     rec_result = inference_pipeline(input=wav, inference_clip_length=250)
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/demo.py b/examples/industrial_data_pretraining/bicif_paraformer/demo.py
index c8c4fa7..0b17065 100644
--- a/examples/industrial_data_pretraining/bicif_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/bicif_paraformer/demo.py
@@ -6,11 +6,11 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
-                  model_revision="v2.0.4",
+                  model_revision="master",
                   vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
-                  vad_model_revision="v2.0.4",
+                  vad_model_revision="master",
                   punc_model="iic/punc_ct-transformer_cn-en-common-vocab471067-large",
-                  punc_model_revision="v2.0.4",
+                  punc_model_revision="master",
                   # spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
                   # spk_model_revision="v2.0.2",
                   )
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/demo.sh b/examples/industrial_data_pretraining/bicif_paraformer/demo.sh
index b6c1d17..fe44734 100644
--- a/examples/industrial_data_pretraining/bicif_paraformer/demo.sh
+++ b/examples/industrial_data_pretraining/bicif_paraformer/demo.sh
@@ -1,11 +1,11 @@
 
 model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-vad_model_revision="v2.0.4"
+vad_model_revision="master"
 #punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
 punc_model="iic/punc_ct-transformer_cn-en-common-vocab471067-large"
-punc_model_revision="v2.0.4"
+punc_model_revision="master"
 spk_model="iic/speech_campplus_sv_zh-cn_16k-common"
 spk_model_revision="v2.0.2"
 
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/export.py b/examples/industrial_data_pretraining/bicif_paraformer/export.py
index 28a9e7b..8e45a23 100644
--- a/examples/industrial_data_pretraining/bicif_paraformer/export.py
+++ b/examples/industrial_data_pretraining/bicif_paraformer/export.py
@@ -8,7 +8,7 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
-                  model_revision="v2.0.4", device="cpu")
+                  model_revision="master", device="cpu")
 
 res = model.export(type="onnx", quantize=False)
 print(res)
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/export.sh b/examples/industrial_data_pretraining/bicif_paraformer/export.sh
index b6883b7..cf040ec 100644
--- a/examples/industrial_data_pretraining/bicif_paraformer/export.sh
+++ b/examples/industrial_data_pretraining/bicif_paraformer/export.sh
@@ -6,7 +6,7 @@
 
 
 model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 python -m funasr.bin.export \
 ++model=${model} \
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh b/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
index 885e5c0..9d251a1 100644
--- a/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
@@ -11,7 +11,7 @@
 
 ## option 1, download model automatically
 model_name_or_model_dir="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 ## option 2, download model by git
 #local_path_root=${workspace}/modelscope_models
diff --git a/examples/industrial_data_pretraining/conformer/demo.py b/examples/industrial_data_pretraining/conformer/demo.py
index 358a1f8..1abc7a7 100644
--- a/examples/industrial_data_pretraining/conformer/demo.py
+++ b/examples/industrial_data_pretraining/conformer/demo.py
@@ -5,7 +5,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch", model_revision="v2.0.4",
+model = AutoModel(model="iic/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch", model_revision="master",
                   )
 
 res = model.generate(input="https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav")
diff --git a/examples/industrial_data_pretraining/conformer/demo.sh b/examples/industrial_data_pretraining/conformer/demo.sh
index c259799..9cf6cc5 100644
--- a/examples/industrial_data_pretraining/conformer/demo.sh
+++ b/examples/industrial_data_pretraining/conformer/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/contextual_paraformer/demo.py b/examples/industrial_data_pretraining/contextual_paraformer/demo.py
index 4168a7d..cd93a4f 100755
--- a/examples/industrial_data_pretraining/contextual_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/contextual_paraformer/demo.py
@@ -5,7 +5,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", model_revision="v2.0.4")
+model = AutoModel(model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", model_revision="master")
 
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
             hotword='杈炬懇闄� 榄旀惌')
diff --git a/examples/industrial_data_pretraining/contextual_paraformer/demo.sh b/examples/industrial_data_pretraining/contextual_paraformer/demo.sh
index 987f2ff..e651338 100755
--- a/examples/industrial_data_pretraining/contextual_paraformer/demo.sh
+++ b/examples/industrial_data_pretraining/contextual_paraformer/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
-model_revision="v2.0.4"
+model_revision="master"
 
 python ../../../funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh b/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
index de88968..fe12315 100644
--- a/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
@@ -11,7 +11,7 @@
 
 ## option 1, download model automatically
 model_name_or_model_dir="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
-model_revision="v2.0.4"
+model_revision="master"
 
 ## option 2, download model by git
 #local_path_root=${workspace}/modelscope_models
diff --git a/examples/industrial_data_pretraining/ct_transformer/demo.py b/examples/industrial_data_pretraining/ct_transformer/demo.py
index 31755dc..6e6b478 100644
--- a/examples/industrial_data_pretraining/ct_transformer/demo.py
+++ b/examples/industrial_data_pretraining/ct_transformer/demo.py
@@ -5,7 +5,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch", model_revision="v2.0.4")
+model = AutoModel(model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch", model_revision="master")
 
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt")
 print(res)
@@ -13,7 +13,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/punc_ct-transformer_cn-en-common-vocab471067-large", model_revision="v2.0.4")
+model = AutoModel(model="iic/punc_ct-transformer_cn-en-common-vocab471067-large", model_revision="master")
 
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt")
 print(res)
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/ct_transformer/demo.sh b/examples/industrial_data_pretraining/ct_transformer/demo.sh
index 56d0bd3..02ee5a8 100644
--- a/examples/industrial_data_pretraining/ct_transformer/demo.sh
+++ b/examples/industrial_data_pretraining/ct_transformer/demo.sh
@@ -1,9 +1,9 @@
 
 #model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
-#model_revision="v2.0.4"
+#model_revision="master"
 
 model="iic/punc_ct-transformer_cn-en-common-vocab471067-large"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/ct_transformer/export.py b/examples/industrial_data_pretraining/ct_transformer/export.py
index 8c35670..397bb96 100644
--- a/examples/industrial_data_pretraining/ct_transformer/export.py
+++ b/examples/industrial_data_pretraining/ct_transformer/export.py
@@ -8,7 +8,7 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
-                  model_revision="v2.0.4")
+                  model_revision="master")
 
 res = model.export(type="onnx", quantize=False)
 print(res)
diff --git a/examples/industrial_data_pretraining/ct_transformer/export.sh b/examples/industrial_data_pretraining/ct_transformer/export.sh
index a11cda5..5f7e4fb 100644
--- a/examples/industrial_data_pretraining/ct_transformer/export.sh
+++ b/examples/industrial_data_pretraining/ct_transformer/export.sh
@@ -6,7 +6,7 @@
 
 
 model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 python -m funasr.bin.export \
 ++model=${model} \
diff --git a/examples/industrial_data_pretraining/ct_transformer_streaming/demo.py b/examples/industrial_data_pretraining/ct_transformer_streaming/demo.py
index 66cc5c5..14edbe4 100644
--- a/examples/industrial_data_pretraining/ct_transformer_streaming/demo.py
+++ b/examples/industrial_data_pretraining/ct_transformer_streaming/demo.py
@@ -5,7 +5,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727", model_revision="v2.0.4")
+model = AutoModel(model="iic/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727", model_revision="master")
 
 inputs = "璺ㄥ娌虫祦鏄吇鑲叉部宀竱浜烘皯鐨勭敓鍛戒箣婧愰暱鏈熶互鏉ヤ负甯姪涓嬫父鍦板尯闃茬伨鍑忕伨涓柟鎶�鏈汉鍛榺鍦ㄤ笂娓稿湴鍖烘瀬涓烘伓鍔g殑鑷劧鏉′欢涓嬪厠鏈嶅法澶у洶闅剧敋鑷冲啋鐫�鐢熷懡鍗遍櫓|鍚戝嵃鏂规彁渚涙睕鏈熸按鏂囪祫鏂欏鐞嗙揣鎬ヤ簨浠朵腑鏂归噸瑙嗗嵃鏂瑰湪璺ㄥ娌虫祦闂涓婄殑鍏冲垏|鎰挎剰杩涗竴姝ュ畬鍠勫弻鏂硅仈鍚堝伐浣滄満鍒秥鍑℃槸|涓柟鑳藉仛鐨勬垜浠瑋閮戒細鍘诲仛鑰屼笖浼氬仛寰楁洿濂芥垜璇峰嵃搴︽湅鍙嬩滑鏀惧績涓浗鍦ㄤ笂娓哥殑|浠讳綍寮�鍙戝埄鐢ㄩ兘浼氱粡杩囩瀛瑙勫垝鍜岃璇佸吋椤句笂涓嬫父鐨勫埄鐩�"
 vads = inputs.split("|")
diff --git a/examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh b/examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh
index 7124fd5..ad3e3ea 100644
--- a/examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh
+++ b/examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/ct_transformer_streaming/export.py b/examples/industrial_data_pretraining/ct_transformer_streaming/export.py
index 47fa08a..2e3b172 100644
--- a/examples/industrial_data_pretraining/ct_transformer_streaming/export.py
+++ b/examples/industrial_data_pretraining/ct_transformer_streaming/export.py
@@ -8,7 +8,7 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
-                  model_revision="v2.0.4")
+                  model_revision="master")
 
 res = model.export(type="onnx", quantize=False)
 print(res)
diff --git a/examples/industrial_data_pretraining/ct_transformer_streaming/export.sh b/examples/industrial_data_pretraining/ct_transformer_streaming/export.sh
index 118afbb..a47f701 100644
--- a/examples/industrial_data_pretraining/ct_transformer_streaming/export.sh
+++ b/examples/industrial_data_pretraining/ct_transformer_streaming/export.sh
@@ -6,7 +6,7 @@
 
 
 model="iic/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727"
-model_revision="v2.0.4"
+model_revision="master"
 
 python -m funasr.bin.export \
 ++model=${model} \
diff --git a/examples/industrial_data_pretraining/emotion2vec/demo.py b/examples/industrial_data_pretraining/emotion2vec/demo.py
index 510e6e8..50deda3 100644
--- a/examples/industrial_data_pretraining/emotion2vec/demo.py
+++ b/examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -6,9 +6,9 @@
 from funasr import AutoModel
 
 # model="iic/emotion2vec_base"
-model = AutoModel(model="iic/emotion2vec_base_finetuned", model_revision="v2.0.4",
+model = AutoModel(model="iic/emotion2vec_base_finetuned", model_revision="master",
                   # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
-                  # vad_model_revision="v2.0.4",
+                  # vad_model_revision="master",
                   # vad_kwargs={"max_single_segment_time": 2000},
                   )
 
diff --git a/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py b/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py
index 03c01ea..61dce49 100644
--- a/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py
+++ b/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py
@@ -6,7 +6,7 @@
 from funasr import AutoModel
 wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav"
 
-model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="v2.0.4")
+model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="master")
 
 res = model.generate(input=wav_file)
 print(res)
diff --git a/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh b/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh
index 8cdf3ed..0248dd0 100644
--- a/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh
+++ b/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh
@@ -1,7 +1,7 @@
 
 
 model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/fsmn_vad_streaming/export.py b/examples/industrial_data_pretraining/fsmn_vad_streaming/export.py
index 2c8fd4d..f45ddee 100644
--- a/examples/industrial_data_pretraining/fsmn_vad_streaming/export.py
+++ b/examples/industrial_data_pretraining/fsmn_vad_streaming/export.py
@@ -8,7 +8,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="v2.0.4")
+model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="master")
 
 res = model.export(type="onnx", quantize=False)
 print(res)
diff --git a/examples/industrial_data_pretraining/fsmn_vad_streaming/export.sh b/examples/industrial_data_pretraining/fsmn_vad_streaming/export.sh
index 1a8207a..9143dc3 100644
--- a/examples/industrial_data_pretraining/fsmn_vad_streaming/export.sh
+++ b/examples/industrial_data_pretraining/fsmn_vad_streaming/export.sh
@@ -7,7 +7,7 @@
 
 
 model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 python -m funasr.bin.export \
 ++model=${model} \
diff --git a/examples/industrial_data_pretraining/monotonic_aligner/demo.py b/examples/industrial_data_pretraining/monotonic_aligner/demo.py
index 2e76f64..467de1b 100644
--- a/examples/industrial_data_pretraining/monotonic_aligner/demo.py
+++ b/examples/industrial_data_pretraining/monotonic_aligner/demo.py
@@ -5,7 +5,7 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="iic/speech_timestamp_prediction-v1-16k-offline", model_revision="v2.0.4")
+model = AutoModel(model="iic/speech_timestamp_prediction-v1-16k-offline", model_revision="master")
 
 res = model.generate(input=("https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
                    "娆㈣繋澶у鏉ュ埌榄旀惌绀惧尯杩涜浣撻獙"),
diff --git a/examples/industrial_data_pretraining/monotonic_aligner/demo.sh b/examples/industrial_data_pretraining/monotonic_aligner/demo.sh
index ece97ff..649ce70 100644
--- a/examples/industrial_data_pretraining/monotonic_aligner/demo.sh
+++ b/examples/industrial_data_pretraining/monotonic_aligner/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/speech_timestamp_prediction-v1-16k-offline"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/paraformer-zh-spk/demo.py b/examples/industrial_data_pretraining/paraformer-zh-spk/demo.py
index 6ed0217..523bb3a 100644
--- a/examples/industrial_data_pretraining/paraformer-zh-spk/demo.py
+++ b/examples/industrial_data_pretraining/paraformer-zh-spk/demo.py
@@ -6,11 +6,11 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
-                  model_revision="v2.0.4",
+                  model_revision="master",
                   vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
-                  vad_model_revision="v2.0.4",
+                  vad_model_revision="master",
                   punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
-                  punc_model_revision="v2.0.4",
+                  punc_model_revision="master",
                   # spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
                   # spk_model_revision="v2.0.2"
                   )
diff --git a/examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh b/examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh
index dae1aca..fb0c4d9 100644
--- a/examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh
+++ b/examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh
@@ -1,10 +1,10 @@
 
 model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-vad_model_revision="v2.0.4"
+vad_model_revision="master"
 punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
-punc_model_revision="v2.0.4"
+punc_model_revision="master"
 spk_model="iic/speech_campplus_sv_zh-cn_16k-common"
 spk_model_revision="v2.0.2"
 
diff --git a/examples/industrial_data_pretraining/paraformer/demo.py b/examples/industrial_data_pretraining/paraformer/demo.py
index a39a526..6cef234 100644
--- a/examples/industrial_data_pretraining/paraformer/demo.py
+++ b/examples/industrial_data_pretraining/paraformer/demo.py
@@ -6,12 +6,12 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch", 
-                  model_revision="v2.0.4",
+                  model_revision="master",
                   vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
-                  vad_model_revision="v2.0.4",
+                  vad_model_revision="master",
                   vad_kwargs={"max_single_segment_time": 60000},
                   punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
-                  punc_model_revision="v2.0.4",
+                  punc_model_revision="master",
                   # spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
                   # spk_model_revision="v2.0.2",
                   )
@@ -23,7 +23,7 @@
 ''' can not use currently
 from funasr import AutoFrontend
 
-frontend = AutoFrontend(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="v2.0.4")
+frontend = AutoFrontend(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="master")
 
 fbanks = frontend(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", batch_size=2)
 
diff --git a/examples/industrial_data_pretraining/paraformer/export.py b/examples/industrial_data_pretraining/paraformer/export.py
index fce4d77..0c181c1 100644
--- a/examples/industrial_data_pretraining/paraformer/export.py
+++ b/examples/industrial_data_pretraining/paraformer/export.py
@@ -10,7 +10,7 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
-                  model_revision="v2.0.4")
+                  model_revision="master")
 
 res = model.export(type="onnx", quantize=False)
 print(res)
diff --git a/examples/industrial_data_pretraining/paraformer/export.sh b/examples/industrial_data_pretraining/paraformer/export.sh
index fc341e7..18d2039 100644
--- a/examples/industrial_data_pretraining/paraformer/export.sh
+++ b/examples/industrial_data_pretraining/paraformer/export.sh
@@ -6,7 +6,7 @@
 
 
 model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 
 python -m funasr.bin.export \
diff --git a/examples/industrial_data_pretraining/paraformer/finetune.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
index 3041ff3..408076b 100644
--- a/examples/industrial_data_pretraining/paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -10,7 +10,7 @@
 
 ## option 1, download model automatically
 model_name_or_model_dir="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 ## option 2, download model by git
 #local_path_root=${workspace}/modelscope_models
diff --git a/examples/industrial_data_pretraining/paraformer/infer.sh b/examples/industrial_data_pretraining/paraformer/infer.sh
index aa523e5..0b0f931 100644
--- a/examples/industrial_data_pretraining/paraformer/infer.sh
+++ b/examples/industrial_data_pretraining/paraformer/infer.sh
@@ -9,7 +9,7 @@
 output_dir="./outputs/debug"
 
 model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
 
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/demo.py b/examples/industrial_data_pretraining/paraformer_streaming/demo.py
index 9fcee3a..94e5200 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/demo.py
+++ b/examples/industrial_data_pretraining/paraformer_streaming/demo.py
@@ -10,7 +10,7 @@
 chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
 encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
 decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
-model = AutoModel(model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online", model_revision="v2.0.4")
+model = AutoModel(model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online", model_revision="master")
 
 wav_file = os.path.join(model.model_path, "example/asr_example.wav")
 res = model.generate(input=wav_file,
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/demo.sh b/examples/industrial_data_pretraining/paraformer_streaming/demo.sh
index c3f7bb4..a316aaf 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/demo.sh
+++ b/examples/industrial_data_pretraining/paraformer_streaming/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/export.py b/examples/industrial_data_pretraining/paraformer_streaming/export.py
index 8e22310..06cec31 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/export.py
+++ b/examples/industrial_data_pretraining/paraformer_streaming/export.py
@@ -10,7 +10,7 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online",
-                  model_revision="v2.0.4")
+                  model_revision="master")
 
 res = model.export(type="onnx", quantize=False)
 print(res)
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/export.sh b/examples/industrial_data_pretraining/paraformer_streaming/export.sh
index 43e344b..25ac513 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/export.sh
+++ b/examples/industrial_data_pretraining/paraformer_streaming/export.sh
@@ -6,7 +6,7 @@
 
 
 model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online"
-model_revision="v2.0.4"
+model_revision="master"
 
 
 python -m funasr.bin.export \
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
index 2bfb2bf..1e4537a 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
@@ -10,7 +10,7 @@
 
 ## option 1, download model automatically
 model_name_or_model_dir="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online"
-model_revision="v2.0.4"
+model_revision="master"
 
 ## option 2, download model by git
 #local_path_root=${workspace}/modelscope_models
diff --git a/examples/industrial_data_pretraining/scama/demo.py b/examples/industrial_data_pretraining/scama/demo.py
index 1999623..075039a 100644
--- a/examples/industrial_data_pretraining/scama/demo.py
+++ b/examples/industrial_data_pretraining/scama/demo.py
@@ -9,7 +9,7 @@
 encoder_chunk_look_back = 0 #number of chunks to lookback for encoder self-attention
 decoder_chunk_look_back = 0 #number of encoder chunks to lookback for decoder cross-attention
 
-model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_SCAMA_asr-zh-cn-16k-common-vocab8358-streaming", model_revision="v2.0.4")
+model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_SCAMA_asr-zh-cn-16k-common-vocab8358-streaming", model_revision="master")
 cache = {}
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
             chunk_size=chunk_size,
diff --git a/examples/industrial_data_pretraining/scama/demo.sh b/examples/industrial_data_pretraining/scama/demo.sh
index c3f7bb4..a316aaf 100644
--- a/examples/industrial_data_pretraining/scama/demo.sh
+++ b/examples/industrial_data_pretraining/scama/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
index 69e9020..c12b279 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -6,11 +6,11 @@
 from funasr import AutoModel
 
 model = AutoModel(model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
-                  model_revision="v2.0.4",
+                  model_revision="master",
                   # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
-                  # vad_model_revision="v2.0.4",
+                  # vad_model_revision="master",
                   # punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
-                  # punc_model_revision="v2.0.4",
+                  # punc_model_revision="master",
                   # spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
                   # spk_model_revision="v2.0.2",
                   )
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.sh b/examples/industrial_data_pretraining/seaco_paraformer/demo.sh
index 9a02572..65847c7 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.sh
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.sh
@@ -1,10 +1,10 @@
 
 model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-vad_model_revision="v2.0.4"
+vad_model_revision="master"
 punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
-punc_model_revision="v2.0.4"
+punc_model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/uniasr/demo.py b/examples/industrial_data_pretraining/uniasr/demo.py
index 3006d5e..0df06e2 100644
--- a/examples/industrial_data_pretraining/uniasr/demo.py
+++ b/examples/industrial_data_pretraining/uniasr/demo.py
@@ -6,7 +6,7 @@
 from funasr import AutoModel
 
 
-model = AutoModel(model="iic/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline", model_revision="v2.0.4",)
+model = AutoModel(model="iic/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline", model_revision="master",)
 
 
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
@@ -16,7 +16,7 @@
 ''' can not use currently
 from funasr import AutoFrontend
 
-frontend = AutoFrontend(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="v2.0.4")
+frontend = AutoFrontend(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="master")
 
 fbanks = frontend(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", batch_size=2)
 
diff --git a/examples/industrial_data_pretraining/uniasr/demo.sh b/examples/industrial_data_pretraining/uniasr/demo.sh
index 71f249a..e38f974 100644
--- a/examples/industrial_data_pretraining/uniasr/demo.sh
+++ b/examples/industrial_data_pretraining/uniasr/demo.sh
@@ -1,6 +1,6 @@
 
 model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
+model_revision="master"
 
 python funasr/bin/inference.py \
 +model=${model} \
diff --git a/examples/industrial_data_pretraining/whisper/infer.sh b/examples/industrial_data_pretraining/whisper/infer.sh
index 5beb7e2..6e8f247 100644
--- a/examples/industrial_data_pretraining/whisper/infer.sh
+++ b/examples/industrial_data_pretraining/whisper/infer.sh
@@ -11,7 +11,7 @@
 output_dir="./outputs/debug"
 
 model="iic/speech_whisper-large_asr_multilingual"
-model_revision="v2.0.4"
+model_revision="master"
 
 device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
 
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index c31308e..73ddc19 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -109,7 +109,7 @@
         if vad_model is not None:
             logging.info("Building VAD model.")
             vad_kwargs["model"] = vad_model
-            vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", None)
+            vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", "master")
             vad_kwargs["device"] = kwargs["device"]
             vad_model, vad_kwargs = self.build_model(**vad_kwargs)
 
@@ -119,7 +119,7 @@
         if punc_model is not None:
             logging.info("Building punc model.")
             punc_kwargs["model"] = punc_model
-            punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", None)
+            punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", "master")
             punc_kwargs["device"] = kwargs["device"]
             punc_model, punc_kwargs = self.build_model(**punc_kwargs)
 
@@ -129,7 +129,7 @@
         if spk_model is not None:
             logging.info("Building SPK model.")
             spk_kwargs["model"] = spk_model
-            spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", None)
+            spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", "master")
             spk_kwargs["device"] = kwargs["device"]
             spk_model, spk_kwargs = self.build_model(**spk_kwargs)
             self.cb_model = ClusterBackend().to(kwargs["device"])
diff --git a/funasr/download/download_from_hub.py b/funasr/download/download_from_hub.py
index 46704b0..75915f5 100644
--- a/funasr/download/download_from_hub.py
+++ b/funasr/download/download_from_hub.py
@@ -29,7 +29,7 @@
     model_or_path = kwargs.get("model")
     if model_or_path in name_maps_ms:
         model_or_path = name_maps_ms[model_or_path]
-    model_revision = kwargs.get("model_revision")
+    model_revision = kwargs.get("model_revision", "master")
     if not os.path.exists(model_or_path) and "model_path" not in kwargs:
         try:
             model_or_path = get_or_download_model_dir(model_or_path, model_revision,
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index f5dd3b1..90cbd94 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -73,7 +73,7 @@
         hub = audio_encoder_conf.get("hub", None)
         if hub == "ms":
             from funasr import AutoModel
-            model = AutoModel(model=audio_encoder, model_revision="v2.0.4")
+            model = AutoModel(model=audio_encoder, model_revision="master")
             # frontend = model.kwargs.get("frontend")
             audio_encoder_output_size = model.model.encoder_output_size
 
diff --git a/funasr/models/llm_asr_nar/model.py b/funasr/models/llm_asr_nar/model.py
index 06b2193..30537cf 100644
--- a/funasr/models/llm_asr_nar/model.py
+++ b/funasr/models/llm_asr_nar/model.py
@@ -75,7 +75,7 @@
         if hub == "funasr":
             from funasr import AutoModel
             init_param_path = encoder_conf.get("init_param_path", "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
-            model = AutoModel(model=init_param_path, model_revision="v2.0.4")
+            model = AutoModel(model=init_param_path, model_revision="master")
             # frontend = model.kwargs.get("frontend")
             model.model.decoder = None
             
@@ -406,7 +406,7 @@
             from funasr import AutoModel
             init_param_path = encoder_conf.get("init_param_path",
                                                "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
-            model = AutoModel(model=init_param_path, model_revision="v2.0.4")
+            model = AutoModel(model=init_param_path, model_revision="master")
             # frontend = model.kwargs.get("frontend")
             model.model.decoder = None
             

--
Gitblit v1.9.1