From 0ef6f0813c4fbce20c83a1da829ff2aa08df2127 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 15 五月 2024 19:02:00 +0800
Subject: [PATCH] update with main (#1731)

---
 funasr/version.txt                                |    2 
 runtime/onnxruntime/src/paraformer.h              |    6 
 runtime/readme_cn.md                              |    3 
 runtime/docs/SDK_advanced_guide_online.md         |    5 
 funasr/download/download_from_hub.py              |   82 +++++++++++
 runtime/onnxruntime/include/com-define.h          |    7 
 runtime/onnxruntime/src/vocab.cpp                 |   21 ++
 runtime/docs/docker_online_cpu_zh_lists           |    2 
 runtime/docs/SDK_advanced_guide_offline.md        |    5 
 runtime/onnxruntime/src/offline-stream.cpp        |   11 +
 runtime/onnxruntime/src/tokenizer.cpp             |   55 +++++++
 runtime/onnxruntime/src/ct-transformer-online.h   |    2 
 runtime/onnxruntime/src/paraformer.cpp            |   16 +-
 runtime/onnxruntime/src/vocab.h                   |    2 
 runtime/onnxruntime/CMakeLists.txt                |   12 +
 runtime/docs/SDK_advanced_guide_offline_en.md     |    5 
 runtime/python/http/server.py                     |    2 
 runtime/quick_start.md                            |    8 
 runtime/docs/SDK_advanced_guide_offline_en_zh.md  |    5 
 runtime/onnxruntime/src/ct-transformer.cpp        |    4 
 runtime/onnxruntime/src/phone-set.cpp             |   21 ++
 runtime/onnxruntime/src/ct-transformer-online.cpp |    4 
 runtime/onnxruntime/src/punc-model.cpp            |    4 
 runtime/websocket/bin/funasr-wss-server-2pass.cpp |   14 +-
 runtime/quick_start_zh.md                         |    8 
 funasr/download/runtime_sdk_download_tool.py      |   16 +-
 runtime/websocket/bin/funasr-wss-server.cpp       |   12 
 runtime/docs/docker_offline_cpu_zh_lists          |    2 
 runtime/docs/SDK_advanced_guide_offline_zh.md     |    5 
 runtime/onnxruntime/src/model.cpp                 |    8 
 README_zh.md                                      |    1 
 runtime/onnxruntime/include/model.h               |    6 
 runtime/onnxruntime/src/ct-transformer.h          |    2 
 runtime/readme.md                                 |    3 
 README.md                                         |    1 
 runtime/docs/SDK_advanced_guide_online_zh.md      |    5 
 runtime/onnxruntime/src/tokenizer.h               |    2 
 runtime/onnxruntime/src/phone-set.h               |    2 
 runtime/onnxruntime/include/punc-model.h          |    2 
 funasr/download/name_maps_from_hub.py             |    4 
 runtime/onnxruntime/src/tpass-stream.cpp          |   11 +
 funasr/models/emotion2vec/model.py                |    9 +
 funasr/models/sense_voice/whisper_lib/decoding.py |    4 
 runtime/onnxruntime/src/fsmn-vad.cpp              |    2 
 runtime/docs/docker_offline_cpu_en_lists          |    2 
 45 files changed, 318 insertions(+), 87 deletions(-)

diff --git a/README.md b/README.md
index 8b093bc..ba23f3f 100644
--- a/README.md
+++ b/README.md
@@ -28,6 +28,7 @@
 
 <a name="whats-new"></a>
 ## What's new:
+- 2024/05/15: Offline File Transcription Service 4.5, Offline File Transcription Service of English 1.6锛孯eal-time Transcription Service 1.10 released锛宎dapting to FunASR 1.0 model structure锛�([docs](runtime/readme.md))
 - 2024/03/05锛欰dded the Qwen-Audio and Qwen-Audio-Chat large-scale audio-text multimodal models, which have topped multiple audio domain leaderboards. These models support speech dialogue, [usage](examples/industrial_data_pretraining/qwen_audio).
 - 2024/03/05锛欰dded support for the Whisper-large-v3 model, a multitasking model that can perform multilingual speech recognition, speech translation, and language identification. It can be downloaded from the[modelscope](examples/industrial_data_pretraining/whisper/demo.py), and [openai](examples/industrial_data_pretraining/whisper/demo_from_openai.py).
 - 2024/03/05: Offline File Transcription Service 4.4, Offline File Transcription Service of English 1.5锛孯eal-time Transcription Service 1.9 released锛宒ocker image supports ARM64 platform, update modelscope锛�([docs](runtime/readme.md))
diff --git a/README_zh.md b/README_zh.md
index 963469a..44f92e6 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -29,6 +29,7 @@
 
 <a name="鏈�鏂板姩鎬�"></a>
 ## 鏈�鏂板姩鎬�
+- 2024/05/15: 涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.5銆佽嫳鏂囩绾挎枃浠惰浆鍐欐湇鍔� 1.6銆佷腑鏂囧疄鏃惰闊冲惉鍐欐湇鍔� 1.10 鍙戝竷锛岄�傞厤FunASR 1.0妯″瀷缁撴瀯锛涜缁嗕俊鎭弬闃�([閮ㄧ讲鏂囨。](runtime/readme_cn.md))
 - 2024/03/05锛氭柊澧炲姞Qwen-Audio涓嶲wen-Audio-Chat闊抽鏂囨湰妯℃�佸ぇ妯″瀷锛屽湪澶氫釜闊抽棰嗗煙娴嬭瘯姒滃崟鍒锋锛屼腑鏀寔璇煶瀵硅瘽锛岃缁嗙敤娉曡 [绀轰緥](examples/industrial_data_pretraining/qwen_audio)銆�
 - 2024/03/05锛氭柊澧炲姞Whisper-large-v3妯″瀷鏀寔锛屽璇█璇煶璇嗗埆/缈昏瘧/璇璇嗗埆锛屾敮鎸佷粠 [modelscope](examples/industrial_data_pretraining/whisper/demo.py)浠撳簱涓嬭浇锛屼篃鏀寔浠� [openai](examples/industrial_data_pretraining/whisper/demo_from_openai.py)浠撳簱涓嬭浇妯″瀷銆�
 - 2024/03/05: 涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.4銆佽嫳鏂囩绾挎枃浠惰浆鍐欐湇鍔� 1.5銆佷腑鏂囧疄鏃惰闊冲惉鍐欐湇鍔� 1.9 鍙戝竷锛宒ocker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰锛涜缁嗕俊鎭弬闃�([閮ㄧ讲鏂囨。](runtime/readme_cn.md))
diff --git a/funasr/download/download_from_hub.py b/funasr/download/download_from_hub.py
index 43f5b67..075b131 100644
--- a/funasr/download/download_from_hub.py
+++ b/funasr/download/download_from_hub.py
@@ -10,7 +10,7 @@
     if hub == "ms":
         kwargs = download_from_ms(**kwargs)
     elif hub == "hf":
-        pass
+        kwargs = download_from_hf(**kwargs)
     elif hub == "openai":
         model_or_path = kwargs.get("model")
         if os.path.exists(model_or_path):
@@ -34,6 +34,67 @@
     if not os.path.exists(model_or_path) and "model_path" not in kwargs:
         try:
             model_or_path = get_or_download_model_dir(
+                model_or_path,
+                model_revision,
+                is_training=kwargs.get("is_training"),
+                check_latest=kwargs.get("check_latest", True),
+            )
+        except Exception as e:
+            print(f"Download: {model_or_path} failed!: {e}")
+
+    kwargs["model_path"] = model_or_path if "model_path" not in kwargs else kwargs["model_path"]
+
+    if os.path.exists(os.path.join(model_or_path, "configuration.json")):
+        with open(os.path.join(model_or_path, "configuration.json"), "r", encoding="utf-8") as f:
+            conf_json = json.load(f)
+
+            cfg = {}
+            if "file_path_metas" in conf_json:
+                add_file_root_path(model_or_path, conf_json["file_path_metas"], cfg)
+            cfg.update(kwargs)
+            if "config" in cfg:
+                config = OmegaConf.load(cfg["config"])
+                kwargs = OmegaConf.merge(config, cfg)
+                kwargs["model"] = config["model"]
+    elif os.path.exists(os.path.join(model_or_path, "config.yaml")) and os.path.exists(
+        os.path.join(model_or_path, "model.pt")
+    ):
+        config = OmegaConf.load(os.path.join(model_or_path, "config.yaml"))
+        kwargs = OmegaConf.merge(config, kwargs)
+        init_param = os.path.join(model_or_path, "model.pb")
+        kwargs["init_param"] = init_param
+        if os.path.exists(os.path.join(model_or_path, "tokens.txt")):
+            kwargs["tokenizer_conf"]["token_list"] = os.path.join(model_or_path, "tokens.txt")
+        if os.path.exists(os.path.join(model_or_path, "tokens.json")):
+            kwargs["tokenizer_conf"]["token_list"] = os.path.join(model_or_path, "tokens.json")
+        if os.path.exists(os.path.join(model_or_path, "seg_dict")):
+            kwargs["tokenizer_conf"]["seg_dict"] = os.path.join(model_or_path, "seg_dict")
+        if os.path.exists(os.path.join(model_or_path, "bpe.model")):
+            kwargs["tokenizer_conf"]["bpemodel"] = os.path.join(model_or_path, "bpe.model")
+        kwargs["model"] = config["model"]
+        if os.path.exists(os.path.join(model_or_path, "am.mvn")):
+            kwargs["frontend_conf"]["cmvn_file"] = os.path.join(model_or_path, "am.mvn")
+        if os.path.exists(os.path.join(model_or_path, "jieba_usr_dict")):
+            kwargs["jieba_usr_dict"] = os.path.join(model_or_path, "jieba_usr_dict")
+    if isinstance(kwargs, DictConfig):
+        kwargs = OmegaConf.to_container(kwargs, resolve=True)
+    if os.path.exists(os.path.join(model_or_path, "requirements.txt")):
+        requirements = os.path.join(model_or_path, "requirements.txt")
+        print(f"Detect model requirements, begin to install it: {requirements}")
+        from funasr.utils.install_model_requirements import install_requirements
+
+        install_requirements(requirements)
+    return kwargs
+
+
+def download_from_hf(**kwargs):
+    model_or_path = kwargs.get("model")
+    if model_or_path in name_maps_hf:
+        model_or_path = name_maps_hf[model_or_path]
+    model_revision = kwargs.get("model_revision", "master")
+    if not os.path.exists(model_or_path) and "model_path" not in kwargs:
+        try:
+            model_or_path = get_or_download_model_dir_hf(
                 model_or_path,
                 model_revision,
                 is_training=kwargs.get("is_training"),
@@ -136,3 +197,22 @@
             model, revision=model_revision, user_agent={Invoke.KEY: key, ThirdParty.KEY: "funasr"}
         )
     return model_cache_dir
+
+
+def get_or_download_model_dir_hf(
+    model,
+    model_revision=None,
+    is_training=False,
+    check_latest=True,
+):
+    """Get local model directory or download model if necessary.
+
+    Args:
+        model (str): model id or path to local model directory.
+        model_revision  (str, optional): model version number.
+        :param is_training:
+    """
+    from huggingface_hub import snapshot_download
+
+    model_cache_dir = snapshot_download(model)
+    return model_cache_dir
diff --git a/funasr/download/name_maps_from_hub.py b/funasr/download/name_maps_from_hub.py
index 87a89fc..3bb25a7 100644
--- a/funasr/download/name_maps_from_hub.py
+++ b/funasr/download/name_maps_from_hub.py
@@ -14,7 +14,9 @@
     "Qwen-Audio": "Qwen/Qwen-Audio",
 }
 
-name_maps_hf = {}
+name_maps_hf = {
+    "": "",
+}
 
 name_maps_openai = {
     "Whisper-tiny.en": "tiny.en",
diff --git a/funasr/download/runtime_sdk_download_tool.py b/funasr/download/runtime_sdk_download_tool.py
index 7776a71..96c6735 100644
--- a/funasr/download/runtime_sdk_download_tool.py
+++ b/funasr/download/runtime_sdk_download_tool.py
@@ -20,6 +20,7 @@
     args = parser.parse_args()
 
     model_dir = args.model_name
+    output_dir = args.model_name
     if not Path(args.model_name).exists():
         from modelscope.hub.snapshot_download import snapshot_download
 
@@ -27,6 +28,7 @@
             model_dir = snapshot_download(
                 args.model_name, cache_dir=args.export_dir, revision=args.model_revision
             )
+            output_dir = os.path.join(args.export_dir, args.model_name)
         except:
             raise "model_dir must be model_name in modelscope or local path downloaded from modelscope, but is {}".format(
                 model_dir
@@ -37,15 +39,13 @@
             model_file = os.path.join(model_dir, "model_quant.onnx")
         if not os.path.exists(model_file):
             print(".onnx is not exist, begin to export onnx")
-            from funasr.bin.export_model import ModelExport
+            from funasr import AutoModel
 
-            export_model = ModelExport(
-                cache_dir=args.export_dir,
-                onnx=True,
-                device="cpu",
-                quant=args.quantize,
-            )
-            export_model.export(model_dir)
+            export_model = AutoModel(model=args.model_name, output_dir=output_dir)
+            export_model.export(
+                    quantize=args.quantize,
+                    type=args.type,
+                    )
 
 
 if __name__ == "__main__":
diff --git a/funasr/models/emotion2vec/model.py b/funasr/models/emotion2vec/model.py
index 48b8716..d18e184 100644
--- a/funasr/models/emotion2vec/model.py
+++ b/funasr/models/emotion2vec/model.py
@@ -249,10 +249,17 @@
             if self.proj:
                 x = x.mean(dim=1)
                 x = self.proj(x)
+                for idx, lab in enumerate(labels):
+                    x[:,idx] = -np.inf if lab.startswith("unuse") else x[:,idx]
                 x = torch.softmax(x, dim=-1)
                 scores = x[0].tolist()
 
-            result_i = {"key": key[i], "labels": labels, "scores": scores}
+            select_label = [lb for lb in labels if not lb.startswith("unuse")]
+            select_score = [scores[idx] for idx, lb in enumerate(labels) if not lb.startswith("unuse")]
+
+            # result_i = {"key": key[i], "labels": labels, "scores": scores}
+            result_i = {"key": key[i], "labels": select_label, "scores": select_score}
+
             if extract_embedding:
                 result_i["feats"] = feats
             results.append(result_i)
diff --git a/funasr/models/sense_voice/whisper_lib/decoding.py b/funasr/models/sense_voice/whisper_lib/decoding.py
index 609d6a6..a468efa 100644
--- a/funasr/models/sense_voice/whisper_lib/decoding.py
+++ b/funasr/models/sense_voice/whisper_lib/decoding.py
@@ -63,8 +63,8 @@
     else:
         x = x.to(mel.device)
     # FIX(funasr): sense vocie
-    # logits = model.logits(x[:, :-1], mel)[:, -1]
-    logits = model.logits(x[:, :], mel)[:, -1]
+    logits = model.logits(x[:, :-1], mel)[:, -1]
+    # logits = model.logits(x[:, :], mel)[:, -1]
 
     # collect detected languages; suppress all non-language tokens
     mask = torch.ones(logits.shape[-1], dtype=torch.bool)
diff --git a/funasr/version.txt b/funasr/version.txt
index 7717884..3f11ef6 100644
--- a/funasr/version.txt
+++ b/funasr/version.txt
@@ -1 +1 @@
-1.0.26
\ No newline at end of file
+1.0.27
\ No newline at end of file
diff --git a/runtime/docs/SDK_advanced_guide_offline.md b/runtime/docs/SDK_advanced_guide_offline.md
index d975b53..799727f 100644
--- a/runtime/docs/SDK_advanced_guide_offline.md
+++ b/runtime/docs/SDK_advanced_guide_offline.md
@@ -12,6 +12,7 @@
 
 | TIME       | INFO                                                                                                                             | IMAGE VERSION                | IMAGE ID     |
 |------------|----------------------------------------------------------------------------------------------------------------------------------|------------------------------|--------------|
+| 2024.05.15 | Adapting to FunASR 1.0 model structure | funasr-runtime-sdk-cpu-0.4.5 | 058b9882ae67 |
 | 2024.03.05 | docker image supports ARM64 platform, update modelscope | funasr-runtime-sdk-cpu-0.4.4 | 2dc87b86dc49 |
 | 2024.01.25 | Optimized the VAD (Voice Activity Detection) data processing method, significantly reducing peak memory usage; memory leak optimization| funasr-runtime-sdk-cpu-0.4.2 | befdc7b179ed |
 | 2024.01.08 | optimized format sentence-level timestamps | funasr-runtime-sdk-cpu-0.4.1 | 0250f8ef981b |
@@ -34,9 +35,9 @@
 ### Pulling and launching images
 Use the following command to pull and launch the Docker image for the FunASR runtime-SDK:
 ```shell
-sudo docker pull registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+sudo docker pull registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 
-sudo docker run -p 10095:10095 -it --privileged=true -v /root:/workspace/models registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+sudo docker run -p 10095:10095 -it --privileged=true -v /root:/workspace/models registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 ```
 
 Introduction to command parameters: 
diff --git a/runtime/docs/SDK_advanced_guide_offline_en.md b/runtime/docs/SDK_advanced_guide_offline_en.md
index 220c10c..4f61416 100644
--- a/runtime/docs/SDK_advanced_guide_offline_en.md
+++ b/runtime/docs/SDK_advanced_guide_offline_en.md
@@ -6,6 +6,7 @@
 
 | TIME       | INFO                                    | IMAGE VERSION                   | IMAGE ID     |
 |------------|-----------------------------------------|---------------------------------|--------------|
+| 2024.05.15 | Adapting to FunASR 1.0 model structure | funasr-runtime-sdk-en-cpu-0.1.6 | 84d781d07997 |
 | 2024.03.05 | docker image supports ARM64 platform, update modelscope | funasr-runtime-sdk-en-cpu-0.1.5 | 7cca2abc5901 |
 | 2024.01.25 | Optimized the VAD (Voice Activity Detection) data processing method, significantly reducing peak memory usage; memory leak optimization| funasr-runtime-sdk-en-cpu-0.1.3 | c00f9ce7a195 |
 | 2024.01.03 | fixed known crash issues as well as memory leak problems | funasr-runtime-sdk-en-cpu-0.1.2 | 0cdd9f4a4bb5 |
@@ -24,9 +25,9 @@
 ### Pulling and launching images
 Use the following command to pull and launch the Docker image for the FunASR runtime-SDK:
 ```shell
-sudo docker pull registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.5
+sudo docker pull registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.6
 
-sudo docker run -p 10097:10095 -it --privileged=true -v /root:/workspace/models registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.5
+sudo docker run -p 10097:10095 -it --privileged=true -v /root:/workspace/models registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.6
 ```
 Introduction to command parameters: 
 ```text
diff --git a/runtime/docs/SDK_advanced_guide_offline_en_zh.md b/runtime/docs/SDK_advanced_guide_offline_en_zh.md
index b1ce1ee..3d65343 100644
--- a/runtime/docs/SDK_advanced_guide_offline_en_zh.md
+++ b/runtime/docs/SDK_advanced_guide_offline_en_zh.md
@@ -6,6 +6,7 @@
 
 | 鏃堕棿         | 璇︽儏            | 闀滃儚鐗堟湰                            | 闀滃儚ID         |
 |------------|---------------|---------------------------------|--------------|
+| 2024.05.15 | 閫傞厤FunASR 1.0妯″瀷缁撴瀯 | funasr-runtime-sdk-en-cpu-0.1.6 | 84d781d07997 |
 | 2024.03.05 | docker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰 | funasr-runtime-sdk-en-cpu-0.1.5 | 7cca2abc5901 |
 | 2024.01.25 | 浼樺寲vad鏁版嵁澶勭悊鏂瑰紡锛屽ぇ骞呴檷浣庡嘲鍊煎唴瀛樺崰鐢紱鍐呭瓨娉勬紡浼樺寲 | funasr-runtime-sdk-en-cpu-0.1.3 | c00f9ce7a195 |
 | 2024.01.03 | 淇宸茬煡鐨刢rash闂鍙婂唴瀛樻硠婕忛棶棰� | funasr-runtime-sdk-en-cpu-0.1.2 | 0cdd9f4a4bb5 |
@@ -39,11 +40,11 @@
 閫氳繃涓嬭堪鍛戒护鎷夊彇骞跺惎鍔‵unASR runtime-SDK鐨刣ocker闀滃儚锛�
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.5
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.6
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10097:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.5
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.6
 ```
 
 ### 鏈嶅姟绔惎鍔�
diff --git a/runtime/docs/SDK_advanced_guide_offline_zh.md b/runtime/docs/SDK_advanced_guide_offline_zh.md
index ef4cfd2..1cecb88 100644
--- a/runtime/docs/SDK_advanced_guide_offline_zh.md
+++ b/runtime/docs/SDK_advanced_guide_offline_zh.md
@@ -10,6 +10,7 @@
 
 | 鏃堕棿         | 璇︽儏                                                | 闀滃儚鐗堟湰                         | 闀滃儚ID         |
 |------------|---------------------------------------------------|------------------------------|--------------|
+| 2024.05.15 | 閫傞厤FunASR 1.0妯″瀷缁撴瀯 | funasr-runtime-sdk-cpu-0.4.5 | 058b9882ae67 |
 | 2024.03.05 | docker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰 | funasr-runtime-sdk-cpu-0.4.4 | 2dc87b86dc49 |
 | 2024.01.25 | 浼樺寲vad鏁版嵁澶勭悊鏂瑰紡锛屽ぇ骞呴檷浣庡嘲鍊煎唴瀛樺崰鐢紱鍐呭瓨娉勬紡浼樺寲| funasr-runtime-sdk-cpu-0.4.2 | befdc7b179ed |
 | 2024.01.08 | 浼樺寲鍙ュ瓙绾ф椂闂存埑json鏍煎紡 | funasr-runtime-sdk-cpu-0.4.1 | 0250f8ef981b |
@@ -48,11 +49,11 @@
 
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10095:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 ```
 
 ### 鏈嶅姟绔惎鍔�
diff --git a/runtime/docs/SDK_advanced_guide_online.md b/runtime/docs/SDK_advanced_guide_online.md
index 34b601c..be9e5e8 100644
--- a/runtime/docs/SDK_advanced_guide_online.md
+++ b/runtime/docs/SDK_advanced_guide_online.md
@@ -8,6 +8,7 @@
 
 | TIME       | INFO                                                                                | IMAGE VERSION                       | IMAGE ID     |
 |------------|-------------------------------------------------------------------------------------|-------------------------------------|--------------|
+| 2024.05.15 | Adapting to FunASR 1.0 model structure | funasr-runtime-sdk-online-cpu-0.1.10 | 1c2adfcff84d |
 | 2024.03.05 | docker image supports ARM64 platform, update modelscope | funasr-runtime-sdk-online-cpu-0.1.9 | 4a875e08c7a2 |
 | 2024.01.25 | Optimization of the client-side | funasr-runtime-sdk-online-cpu-0.1.7  | 2aa23805572e      |
 | 2024.01.03 | The 2pass-offline mode supports Ngram language model decoding and WFST hotwords, while also addressing known crash issues and memory leak problems | funasr-runtime-sdk-online-cpu-0.1.6  | f99925110d27      |
@@ -29,9 +30,9 @@
 ### Pull Docker Image
 Use the following command to pull and start the FunASR software package docker image:
 ```shell
-sudo docker pull registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+sudo docker pull registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 mkdir -p ./funasr-runtime-resources/models
-sudo docker run -p 10096:10095 -it --privileged=true -v $PWD/funasr-runtime-resources/models:/workspace/models registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+sudo docker run -p 10096:10095 -it --privileged=true -v $PWD/funasr-runtime-resources/models:/workspace/models registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 ```
 
 ### Launching the Server
diff --git a/runtime/docs/SDK_advanced_guide_online_zh.md b/runtime/docs/SDK_advanced_guide_online_zh.md
index 4b72e6d..26ca4bc 100644
--- a/runtime/docs/SDK_advanced_guide_online_zh.md
+++ b/runtime/docs/SDK_advanced_guide_online_zh.md
@@ -12,6 +12,7 @@
 
 | 鏃堕棿         | 璇︽儏                                | 闀滃儚鐗堟湰                                 | 闀滃儚ID         |
 |:-----------|:----------------------------------|--------------------------------------|--------------|
+| 2024.05.15 | 閫傞厤FunASR 1.0妯″瀷缁撴瀯 | funasr-runtime-sdk-online-cpu-0.1.10 | 1c2adfcff84d |
 | 2024.03.05 | docker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰 | funasr-runtime-sdk-online-cpu-0.1.9 | 4a875e08c7a2 |
 | 2024.01.25 | 瀹㈡埛绔紭鍖東 funasr-runtime-sdk-online-cpu-0.1.7  | 2aa23805572e      |
 | 2024.01.03 | 2pass-offline妯″紡鏀寔Ngram璇█妯″瀷瑙g爜銆亀fst鐑瘝锛屽悓鏃朵慨澶嶅凡鐭ョ殑crash闂鍙婂唴瀛樻硠婕忛棶棰� | funasr-runtime-sdk-online-cpu-0.1.6  | f99925110d27      |
@@ -38,11 +39,11 @@
 
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10096:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 ```
 
 ### 鏈嶅姟绔惎鍔�
diff --git a/runtime/docs/docker_offline_cpu_en_lists b/runtime/docs/docker_offline_cpu_en_lists
index 52b14a8..40e5852 100644
--- a/runtime/docs/docker_offline_cpu_en_lists
+++ b/runtime/docs/docker_offline_cpu_en_lists
@@ -1,7 +1,7 @@
 DOCKER:
+  funasr-runtime-sdk-en-cpu-0.1.6
   funasr-runtime-sdk-en-cpu-0.1.5
   funasr-runtime-sdk-en-cpu-0.1.4
-  funasr-runtime-sdk-en-cpu-0.1.3
 DEFAULT_ASR_MODEL:
   damo/speech_paraformer-large_asr_nat-en-16k-common-vocab10020-onnx
 DEFAULT_VAD_MODEL:
diff --git a/runtime/docs/docker_offline_cpu_zh_lists b/runtime/docs/docker_offline_cpu_zh_lists
index ccd5d95..e3d9efc 100644
--- a/runtime/docs/docker_offline_cpu_zh_lists
+++ b/runtime/docs/docker_offline_cpu_zh_lists
@@ -1,5 +1,5 @@
 DOCKER:
-  funasr-runtime-sdk-cpu-0.4.4
+  funasr-runtime-sdk-cpu-0.4.5
   funasr-runtime-sdk-cpu-0.3.0
   funasr-runtime-sdk-cpu-0.2.2
 DEFAULT_ASR_MODEL:
diff --git a/runtime/docs/docker_online_cpu_zh_lists b/runtime/docs/docker_online_cpu_zh_lists
index c4ac16b..4cb5ca0 100644
--- a/runtime/docs/docker_online_cpu_zh_lists
+++ b/runtime/docs/docker_online_cpu_zh_lists
@@ -1,7 +1,7 @@
 DOCKER:
+  funasr-runtime-sdk-online-cpu-0.1.10
   funasr-runtime-sdk-online-cpu-0.1.9
   funasr-runtime-sdk-online-cpu-0.1.8
-  funasr-runtime-sdk-online-cpu-0.1.7
 DEFAULT_ASR_MODEL:
   damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx
   damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-onnx
diff --git a/runtime/onnxruntime/CMakeLists.txt b/runtime/onnxruntime/CMakeLists.txt
index 3450be7..d8e623e 100644
--- a/runtime/onnxruntime/CMakeLists.txt
+++ b/runtime/onnxruntime/CMakeLists.txt
@@ -18,6 +18,17 @@
     message("Little endian system")
 endif()
 
+# json
+include(FetchContent)
+if(NOT EXISTS ${PROJECT_SOURCE_DIR}/third_party/json/ChangeLog.md )
+FetchContent_Declare(json
+  URL   https://github.com/nlohmann/json/archive/refs/tags/v3.11.2.tar.gz
+SOURCE_DIR ${PROJECT_SOURCE_DIR}/third_party/json
+)
+
+FetchContent_MakeAvailable(json)
+endif()
+
 # for onnxruntime
 IF(WIN32)
     file(REMOVE ${PROJECT_SOURCE_DIR}/third_party/glog/src/config.h 
@@ -36,6 +47,7 @@
 include_directories(${PROJECT_SOURCE_DIR}/third_party/jieba/include)
 include_directories(${PROJECT_SOURCE_DIR}/third_party/jieba/include/limonp/include)
 include_directories(${PROJECT_SOURCE_DIR}/third_party/kaldi)
+include_directories(${PROJECT_SOURCE_DIR}/third_party/json/include)
 
 if(ENABLE_GLOG)
     include_directories(${PROJECT_SOURCE_DIR}/third_party/glog/src)
diff --git a/runtime/onnxruntime/include/com-define.h b/runtime/onnxruntime/include/com-define.h
index 9cb1f2c..d4edd5b 100644
--- a/runtime/onnxruntime/include/com-define.h
+++ b/runtime/onnxruntime/include/com-define.h
@@ -49,13 +49,14 @@
 // hotword embedding compile model
 #define MODEL_EB_NAME "model_eb.onnx"
 #define QUANT_MODEL_NAME "model_quant.onnx"
-#define VAD_CMVN_NAME "vad.mvn"
-#define VAD_CONFIG_NAME "vad.yaml"
+#define VAD_CMVN_NAME "am.mvn"
+#define VAD_CONFIG_NAME "config.yaml"
 #define AM_CMVN_NAME "am.mvn"
 #define AM_CONFIG_NAME "config.yaml"
 #define LM_CONFIG_NAME "config.yaml"
-#define PUNC_CONFIG_NAME "punc.yaml"
+#define PUNC_CONFIG_NAME "config.yaml"
 #define MODEL_SEG_DICT "seg_dict"
+#define TOKEN_PATH "tokens.json"
 #define HOTWORD "hotword"
 // #define NN_HOTWORD "nn-hotword"
 
diff --git a/runtime/onnxruntime/include/model.h b/runtime/onnxruntime/include/model.h
index 33caec8..f5c4027 100644
--- a/runtime/onnxruntime/include/model.h
+++ b/runtime/onnxruntime/include/model.h
@@ -12,9 +12,9 @@
     virtual void StartUtterance() = 0;
     virtual void EndUtterance() = 0;
     virtual void Reset() = 0;
-    virtual void InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){};
-    virtual void InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){};
-    virtual void InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){};
+    virtual void InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num){};
+    virtual void InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num){};
+    virtual void InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num){};
     virtual void InitLm(const std::string &lm_file, const std::string &lm_config, const std::string &lex_file){};
     virtual void InitFstDecoder(){};
     virtual std::string Forward(float *din, int len, bool input_finished, const std::vector<std::vector<float>> &hw_emb={{0.0}}, void* wfst_decoder=nullptr){return "";};
diff --git a/runtime/onnxruntime/include/punc-model.h b/runtime/onnxruntime/include/punc-model.h
index 214c770..3cec2c1 100644
--- a/runtime/onnxruntime/include/punc-model.h
+++ b/runtime/onnxruntime/include/punc-model.h
@@ -11,7 +11,7 @@
 class PuncModel {
   public:
     virtual ~PuncModel(){};
-	  virtual void InitPunc(const std::string &punc_model, const std::string &punc_config, int thread_num)=0;
+	  virtual void InitPunc(const std::string &punc_model, const std::string &punc_config, const std::string &token_file, int thread_num)=0;
 	  virtual std::string AddPunc(const char* sz_input, std::string language="zh-cn"){return "";};
 	  virtual std::string AddPunc(const char* sz_input, std::vector<std::string>& arr_cache, std::string language="zh-cn"){return "";};
 };
diff --git a/runtime/onnxruntime/src/ct-transformer-online.cpp b/runtime/onnxruntime/src/ct-transformer-online.cpp
index 4e9136e..92fe41e 100644
--- a/runtime/onnxruntime/src/ct-transformer-online.cpp
+++ b/runtime/onnxruntime/src/ct-transformer-online.cpp
@@ -11,7 +11,7 @@
 {
 }
 
-void CTTransformerOnline::InitPunc(const std::string &punc_model, const std::string &punc_config, int thread_num){
+void CTTransformerOnline::InitPunc(const std::string &punc_model, const std::string &punc_config, const std::string &token_file, int thread_num){
     session_options.SetIntraOpNumThreads(thread_num);
     session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
     session_options.DisableCpuMemArena();
@@ -43,7 +43,7 @@
     for (auto& item : m_strOutputNames)
         m_szOutputNames.push_back(item.c_str());
 
-	m_tokenizer.OpenYaml(punc_config.c_str());
+	m_tokenizer.OpenYaml(punc_config.c_str(), token_file.c_str());
 }
 
 CTTransformerOnline::~CTTransformerOnline()
diff --git a/runtime/onnxruntime/src/ct-transformer-online.h b/runtime/onnxruntime/src/ct-transformer-online.h
index ea7edb7..13f40a0 100644
--- a/runtime/onnxruntime/src/ct-transformer-online.h
+++ b/runtime/onnxruntime/src/ct-transformer-online.h
@@ -26,7 +26,7 @@
 public:
 
 	CTTransformerOnline();
-	void InitPunc(const std::string &punc_model, const std::string &punc_config, int thread_num);
+	void InitPunc(const std::string &punc_model, const std::string &punc_config, const std::string &token_file, int thread_num);
 	~CTTransformerOnline();
 	vector<int>  Infer(vector<int32_t> input_data, int nCacheSize);
 	string AddPunc(const char* sz_input, vector<string> &arr_cache, std::string language="zh-cn");
diff --git a/runtime/onnxruntime/src/ct-transformer.cpp b/runtime/onnxruntime/src/ct-transformer.cpp
index 8f8d953..d1a7813 100644
--- a/runtime/onnxruntime/src/ct-transformer.cpp
+++ b/runtime/onnxruntime/src/ct-transformer.cpp
@@ -11,7 +11,7 @@
 {
 }
 
-void CTTransformer::InitPunc(const std::string &punc_model, const std::string &punc_config, int thread_num){
+void CTTransformer::InitPunc(const std::string &punc_model, const std::string &punc_config, const std::string &token_file, int thread_num){
     session_options.SetIntraOpNumThreads(thread_num);
     session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
     session_options.DisableCpuMemArena();
@@ -39,7 +39,7 @@
     for (auto& item : m_strOutputNames)
         m_szOutputNames.push_back(item.c_str());
 
-	m_tokenizer.OpenYaml(punc_config.c_str());
+	m_tokenizer.OpenYaml(punc_config.c_str(), token_file.c_str());
     m_tokenizer.JiebaInit(punc_config);
 }
 
diff --git a/runtime/onnxruntime/src/ct-transformer.h b/runtime/onnxruntime/src/ct-transformer.h
index b33dcf5..f38fe12 100644
--- a/runtime/onnxruntime/src/ct-transformer.h
+++ b/runtime/onnxruntime/src/ct-transformer.h
@@ -26,7 +26,7 @@
 public:
 
 	CTTransformer();
-	void InitPunc(const std::string &punc_model, const std::string &punc_config, int thread_num);
+	void InitPunc(const std::string &punc_model, const std::string &punc_config, const std::string &token_file, int thread_num);
 	~CTTransformer();
 	vector<int>  Infer(vector<int32_t> input_data);
 	string AddPunc(const char* sz_input, std::string language="zh-cn");
diff --git a/runtime/onnxruntime/src/fsmn-vad.cpp b/runtime/onnxruntime/src/fsmn-vad.cpp
index c832274..42ce83b 100644
--- a/runtime/onnxruntime/src/fsmn-vad.cpp
+++ b/runtime/onnxruntime/src/fsmn-vad.cpp
@@ -30,7 +30,7 @@
 
     try{
         YAML::Node frontend_conf = config["frontend_conf"];
-        YAML::Node post_conf = config["vad_post_conf"];
+        YAML::Node post_conf = config["model_conf"];
 
         this->vad_sample_rate_ = frontend_conf["fs"].as<int>();
         this->vad_silence_duration_ =  post_conf["max_end_silence_time"].as<int>();
diff --git a/runtime/onnxruntime/src/model.cpp b/runtime/onnxruntime/src/model.cpp
index 646f260..8b5e33f 100644
--- a/runtime/onnxruntime/src/model.cpp
+++ b/runtime/onnxruntime/src/model.cpp
@@ -8,6 +8,7 @@
         string am_model_path;
         string am_cmvn_path;
         string am_config_path;
+        string token_path;
 
         am_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_NAME);
         if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
@@ -15,10 +16,11 @@
         }
         am_cmvn_path = PathAppend(model_path.at(MODEL_DIR), AM_CMVN_NAME);
         am_config_path = PathAppend(model_path.at(MODEL_DIR), AM_CONFIG_NAME);
+        token_path = PathAppend(model_path.at(MODEL_DIR), TOKEN_PATH);
 
         Model *mm;
         mm = new Paraformer();
-        mm->InitAsr(am_model_path, am_cmvn_path, am_config_path, thread_num);
+        mm->InitAsr(am_model_path, am_cmvn_path, am_config_path, token_path, thread_num);
         return mm;
     }else if(type == ASR_ONLINE){
         // online
@@ -26,6 +28,7 @@
         string de_model_path;
         string am_cmvn_path;
         string am_config_path;
+        string token_path;
 
         en_model_path = PathAppend(model_path.at(MODEL_DIR), ENCODER_NAME);
         de_model_path = PathAppend(model_path.at(MODEL_DIR), DECODER_NAME);
@@ -35,10 +38,11 @@
         }
         am_cmvn_path = PathAppend(model_path.at(MODEL_DIR), AM_CMVN_NAME);
         am_config_path = PathAppend(model_path.at(MODEL_DIR), AM_CONFIG_NAME);
+        token_path = PathAppend(model_path.at(MODEL_DIR), TOKEN_PATH);
 
         Model *mm;
         mm = new Paraformer();
-        mm->InitAsr(en_model_path, de_model_path, am_cmvn_path, am_config_path, thread_num);
+        mm->InitAsr(en_model_path, de_model_path, am_cmvn_path, am_config_path, token_path, thread_num);
         return mm;
     }else{
         LOG(ERROR)<<"Wrong ASR_TYPE : " << type;
diff --git a/runtime/onnxruntime/src/offline-stream.cpp b/runtime/onnxruntime/src/offline-stream.cpp
index ae8cf18..7d86f9b 100644
--- a/runtime/onnxruntime/src/offline-stream.cpp
+++ b/runtime/onnxruntime/src/offline-stream.cpp
@@ -32,6 +32,7 @@
         string am_model_path;
         string am_cmvn_path;
         string am_config_path;
+        string token_path;
         string hw_compile_model_path;
         string seg_dict_path;
     
@@ -57,8 +58,9 @@
         }
         am_cmvn_path = PathAppend(model_path.at(MODEL_DIR), AM_CMVN_NAME);
         am_config_path = PathAppend(model_path.at(MODEL_DIR), AM_CONFIG_NAME);
+        token_path = PathAppend(model_path.at(MODEL_DIR), TOKEN_PATH);
 
-        asr_handle->InitAsr(am_model_path, am_cmvn_path, am_config_path, thread_num);
+        asr_handle->InitAsr(am_model_path, am_cmvn_path, am_config_path, token_path, thread_num);
     }
 
     // Lm resource
@@ -79,20 +81,23 @@
     if(model_path.find(PUNC_DIR) != model_path.end()){
         string punc_model_path;
         string punc_config_path;
+        string token_path;
     
         punc_model_path = PathAppend(model_path.at(PUNC_DIR), MODEL_NAME);
         if(model_path.find(PUNC_QUANT) != model_path.end() && model_path.at(PUNC_QUANT) == "true"){
             punc_model_path = PathAppend(model_path.at(PUNC_DIR), QUANT_MODEL_NAME);
         }
         punc_config_path = PathAppend(model_path.at(PUNC_DIR), PUNC_CONFIG_NAME);
+        token_path = PathAppend(model_path.at(PUNC_DIR), TOKEN_PATH);
 
         if (access(punc_model_path.c_str(), F_OK) != 0 ||
-            access(punc_config_path.c_str(), F_OK) != 0 )
+            access(punc_config_path.c_str(), F_OK) != 0 ||
+            access(token_path.c_str(), F_OK) != 0)
         {
             LOG(INFO) << "PUNC model file is not exist, skip load punc model.";
         }else{
             punc_handle = make_unique<CTTransformer>();
-            punc_handle->InitPunc(punc_model_path, punc_config_path, thread_num);
+            punc_handle->InitPunc(punc_model_path, punc_config_path, token_path, thread_num);
             use_punc = true;
         }
     }
diff --git a/runtime/onnxruntime/src/paraformer.cpp b/runtime/onnxruntime/src/paraformer.cpp
index c56421c..a57fb9b 100644
--- a/runtime/onnxruntime/src/paraformer.cpp
+++ b/runtime/onnxruntime/src/paraformer.cpp
@@ -18,7 +18,7 @@
 }
 
 // offline
-void Paraformer::InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
+void Paraformer::InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num){
     LoadConfigFromYaml(am_config.c_str());
     // knf options
     fbank_opts_.frame_opts.dither = 0;
@@ -65,13 +65,13 @@
         m_szInputNames.push_back(item.c_str());
     for (auto& item : m_strOutputNames)
         m_szOutputNames.push_back(item.c_str());
-    vocab = new Vocab(am_config.c_str());
-	phone_set_ = new PhoneSet(am_config.c_str());
+    vocab = new Vocab(token_file.c_str());
+	phone_set_ = new PhoneSet(token_file.c_str());
     LoadCmvn(am_cmvn.c_str());
 }
 
 // online
-void Paraformer::InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
+void Paraformer::InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num){
     
     LoadOnlineConfigFromYaml(am_config.c_str());
     // knf options
@@ -143,15 +143,15 @@
     for (auto& item : de_strOutputNames)
         de_szOutputNames_.push_back(item.c_str());
 
-    vocab = new Vocab(am_config.c_str());
-    phone_set_ = new PhoneSet(am_config.c_str());
+    vocab = new Vocab(token_file.c_str());
+    phone_set_ = new PhoneSet(token_file.c_str());
     LoadCmvn(am_cmvn.c_str());
 }
 
 // 2pass
-void Paraformer::InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
+void Paraformer::InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num){
     // online
-    InitAsr(en_model, de_model, am_cmvn, am_config, thread_num);
+    InitAsr(en_model, de_model, am_cmvn, am_config, token_file, thread_num);
 
     // offline
     try {
diff --git a/runtime/onnxruntime/src/paraformer.h b/runtime/onnxruntime/src/paraformer.h
index 5bb9477..417c2d7 100644
--- a/runtime/onnxruntime/src/paraformer.h
+++ b/runtime/onnxruntime/src/paraformer.h
@@ -42,11 +42,11 @@
     public:
         Paraformer();
         ~Paraformer();
-        void InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, int thread_num);
+        void InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num);
         // online
-        void InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num);
+        void InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num);
         // 2pass
-        void InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num);
+        void InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, const std::string &token_file, int thread_num);
         void InitHwCompiler(const std::string &hw_model, int thread_num);
         void InitSegDict(const std::string &seg_dict_model);
         std::vector<std::vector<float>> CompileHotwordEmbedding(std::string &hotwords);
diff --git a/runtime/onnxruntime/src/phone-set.cpp b/runtime/onnxruntime/src/phone-set.cpp
index 167fa01..60eb101 100644
--- a/runtime/onnxruntime/src/phone-set.cpp
+++ b/runtime/onnxruntime/src/phone-set.cpp
@@ -13,7 +13,7 @@
 namespace funasr {
 PhoneSet::PhoneSet(const char *filename) {
   ifstream in(filename);
-  LoadPhoneSetFromYaml(filename);
+  LoadPhoneSetFromJson(filename);
 }
 PhoneSet::~PhoneSet()
 {
@@ -35,6 +35,25 @@
   }
 }
 
+void PhoneSet::LoadPhoneSetFromJson(const char* filename) {
+    nlohmann::json json_array;
+    std::ifstream file(filename);
+    if (file.is_open()) {
+        file >> json_array;
+        file.close();
+    } else {
+        LOG(INFO) << "Error loading token file, token file error or not exist.";
+        exit(-1);
+    }
+
+    int id = 0;
+    for (const auto& element : json_array) {
+        phone_.push_back(element);
+        phn2Id_.emplace(element, id);
+        id++;
+    }
+}
+
 int PhoneSet::Size() const {
   return phone_.size();
 }
diff --git a/runtime/onnxruntime/src/phone-set.h b/runtime/onnxruntime/src/phone-set.h
index 9729104..cb1a2a7 100644
--- a/runtime/onnxruntime/src/phone-set.h
+++ b/runtime/onnxruntime/src/phone-set.h
@@ -5,6 +5,7 @@
 #include <string>
 #include <vector>
 #include <unordered_map>
+#include "nlohmann/json.hpp"
 #define UNIT_BEG_SIL_SYMBOL "<s>"
 #define UNIT_END_SIL_SYMBOL "</s>"
 #define UNIT_BLK_SYMBOL "<blank>"
@@ -28,6 +29,7 @@
     vector<string> phone_;
     unordered_map<string, int> phn2Id_;
     void LoadPhoneSetFromYaml(const char* filename);
+    void LoadPhoneSetFromJson(const char* filename);
 };
 
 } // namespace funasr
diff --git a/runtime/onnxruntime/src/punc-model.cpp b/runtime/onnxruntime/src/punc-model.cpp
index 54b8d6a..9af03db 100644
--- a/runtime/onnxruntime/src/punc-model.cpp
+++ b/runtime/onnxruntime/src/punc-model.cpp
@@ -14,14 +14,16 @@
     }
     string punc_model_path;
     string punc_config_path;
+    string token_file;
 
     punc_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_NAME);
     if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
         punc_model_path = PathAppend(model_path.at(MODEL_DIR), QUANT_MODEL_NAME);
     }
     punc_config_path = PathAppend(model_path.at(MODEL_DIR), PUNC_CONFIG_NAME);
+    token_file = PathAppend(model_path.at(MODEL_DIR), TOKEN_PATH);
 
-    mm->InitPunc(punc_model_path, punc_config_path, thread_num);
+    mm->InitPunc(punc_model_path, punc_config_path, token_file, thread_num);
     return mm;
 }
 
diff --git a/runtime/onnxruntime/src/tokenizer.cpp b/runtime/onnxruntime/src/tokenizer.cpp
index 7618282..06d64d8 100644
--- a/runtime/onnxruntime/src/tokenizer.cpp
+++ b/runtime/onnxruntime/src/tokenizer.cpp
@@ -127,6 +127,61 @@
 	return m_ready;
 }
 
+bool CTokenizer::OpenYaml(const char* sz_yamlfile, const char* token_file)
+{
+	YAML::Node m_Config;
+	try{
+		m_Config = YAML::LoadFile(sz_yamlfile);
+	}catch(exception const &e){
+        LOG(INFO) << "Error loading file, yaml file error or not exist.";
+        exit(-1);
+    }
+
+	try
+	{
+		YAML::Node conf_seg_jieba = m_Config["seg_jieba"];
+        if (conf_seg_jieba.IsDefined()){
+            seg_jieba = conf_seg_jieba.as<bool>();
+        }
+
+		auto Puncs = m_Config["model_conf"]["punc_list"];
+		if (Puncs.IsSequence())
+		{
+			for (size_t i = 0; i < Puncs.size(); ++i)
+			{
+				if (Puncs[i].IsScalar())
+				{ 
+					m_id2punc.push_back(Puncs[i].as<string>());
+					m_punc2id.insert(make_pair<string, int>(Puncs[i].as<string>(), i));
+				}
+			}
+		}
+
+		nlohmann::json json_array;
+		std::ifstream file(token_file);
+		if (file.is_open()) {
+			file >> json_array;
+			file.close();
+		} else {
+			LOG(INFO) << "Error loading token file, token file error or not exist.";
+			return  false;
+		}
+
+		int i = 0;
+		for (const auto& element : json_array) {
+			m_id2token.push_back(element);
+			m_token2id[element] = i;
+			i++;
+		}
+	}
+	catch (YAML::BadFile& e) {
+		LOG(ERROR) << "Read error!";
+		return  false;
+	}
+	m_ready = true;
+	return m_ready;
+}
+
 vector<string> CTokenizer::Id2String(vector<int> input)
 {
 	vector<string> result;
diff --git a/runtime/onnxruntime/src/tokenizer.h b/runtime/onnxruntime/src/tokenizer.h
index 166061b..81aea7e 100644
--- a/runtime/onnxruntime/src/tokenizer.h
+++ b/runtime/onnxruntime/src/tokenizer.h
@@ -8,6 +8,7 @@
 #include "cppjieba/DictTrie.hpp"
 #include "cppjieba/HMMModel.hpp"
 #include "cppjieba/Jieba.hpp"
+#include "nlohmann/json.hpp"
 
 namespace funasr {
 class CTokenizer {
@@ -27,6 +28,7 @@
 	CTokenizer();
 	~CTokenizer();
 	bool OpenYaml(const char* sz_yamlfile);
+	bool OpenYaml(const char* sz_yamlfile, const char* token_file);
 	void ReadYaml(const YAML::Node& node);
 	vector<string> Id2String(vector<int> input);
 	vector<int> String2Ids(vector<string> input);
diff --git a/runtime/onnxruntime/src/tpass-stream.cpp b/runtime/onnxruntime/src/tpass-stream.cpp
index b723e0f..7681a4d 100644
--- a/runtime/onnxruntime/src/tpass-stream.cpp
+++ b/runtime/onnxruntime/src/tpass-stream.cpp
@@ -35,6 +35,7 @@
         string de_model_path;
         string am_cmvn_path;
         string am_config_path;
+        string token_path;
         string hw_compile_model_path;
         string seg_dict_path;
         
@@ -60,8 +61,9 @@
         }
         am_cmvn_path = PathAppend(model_path.at(ONLINE_MODEL_DIR), AM_CMVN_NAME);
         am_config_path = PathAppend(model_path.at(ONLINE_MODEL_DIR), AM_CONFIG_NAME);
+        token_path = PathAppend(model_path.at(MODEL_DIR), TOKEN_PATH);
 
-        asr_handle->InitAsr(am_model_path, en_model_path, de_model_path, am_cmvn_path, am_config_path, thread_num);
+        asr_handle->InitAsr(am_model_path, en_model_path, de_model_path, am_cmvn_path, am_config_path, token_path, thread_num);
     }else{
         LOG(ERROR) <<"Can not find offline-model-dir or online-model-dir";
         exit(-1);
@@ -85,20 +87,23 @@
     if(model_path.find(PUNC_DIR) != model_path.end()){
         string punc_model_path;
         string punc_config_path;
+        string token_path;
     
         punc_model_path = PathAppend(model_path.at(PUNC_DIR), MODEL_NAME);
         if(model_path.find(PUNC_QUANT) != model_path.end() && model_path.at(PUNC_QUANT) == "true"){
             punc_model_path = PathAppend(model_path.at(PUNC_DIR), QUANT_MODEL_NAME);
         }
         punc_config_path = PathAppend(model_path.at(PUNC_DIR), PUNC_CONFIG_NAME);
+        token_path = PathAppend(model_path.at(PUNC_DIR), TOKEN_PATH);
 
         if (access(punc_model_path.c_str(), F_OK) != 0 ||
-            access(punc_config_path.c_str(), F_OK) != 0 )
+            access(punc_config_path.c_str(), F_OK) != 0 ||
+            access(token_path.c_str(), F_OK) != 0)
         {
             LOG(INFO) << "PUNC model file is not exist, skip load punc model.";
         }else{
             punc_online_handle = make_unique<CTTransformerOnline>();
-            punc_online_handle->InitPunc(punc_model_path, punc_config_path, thread_num);
+            punc_online_handle->InitPunc(punc_model_path, punc_config_path, token_path, thread_num);
             use_punc = true;
         }
     }
diff --git a/runtime/onnxruntime/src/vocab.cpp b/runtime/onnxruntime/src/vocab.cpp
index 6991376..1416dd3 100644
--- a/runtime/onnxruntime/src/vocab.cpp
+++ b/runtime/onnxruntime/src/vocab.cpp
@@ -14,7 +14,7 @@
 Vocab::Vocab(const char *filename)
 {
     ifstream in(filename);
-    LoadVocabFromYaml(filename);
+    LoadVocabFromJson(filename);
 }
 Vocab::Vocab(const char *filename, const char *lex_file)
 {
@@ -43,6 +43,25 @@
     }
 }
 
+void Vocab::LoadVocabFromJson(const char* filename){
+    nlohmann::json json_array;
+    std::ifstream file(filename);
+    if (file.is_open()) {
+        file >> json_array;
+        file.close();
+    } else {
+        LOG(INFO) << "Error loading token file, token file error or not exist.";
+        exit(-1);
+    }
+
+    int i = 0;
+    for (const auto& element : json_array) {
+        vocab.push_back(element);
+        token_id[element] = i;
+        i++;
+    }
+}
+
 void Vocab::LoadLex(const char* filename){
     std::ifstream file(filename);
     std::string line;
diff --git a/runtime/onnxruntime/src/vocab.h b/runtime/onnxruntime/src/vocab.h
index 19e3648..36fabf4 100644
--- a/runtime/onnxruntime/src/vocab.h
+++ b/runtime/onnxruntime/src/vocab.h
@@ -6,6 +6,7 @@
 #include <string>
 #include <vector>
 #include <map>
+#include "nlohmann/json.hpp"
 using namespace std;
 
 namespace funasr {
@@ -16,6 +17,7 @@
     std::map<string, string> lex_map;
     bool IsEnglish(string ch);
     void LoadVocabFromYaml(const char* filename);
+    void LoadVocabFromJson(const char* filename);
     void LoadLex(const char* filename);
 
   public:
diff --git a/runtime/python/http/server.py b/runtime/python/http/server.py
index 720d4ce..8bff9c9 100644
--- a/runtime/python/http/server.py
+++ b/runtime/python/http/server.py
@@ -117,7 +117,7 @@
         for sentence in rec_result["sentence_info"]:
             # 姣忓彞璇濈殑鏃堕棿鎴�
             sentences.append(
-                {"text": sentence["text"], "start": sentence["start"], "end": sentence["start"]}
+                {"text": sentence["text"], "start": sentence["start"], "end": sentence["end"]}
             )
         ret = {"text": text, "sentences": sentences, "code": 0}
         logger.info(f"璇嗗埆缁撴灉锛歿ret}")
diff --git a/runtime/quick_start.md b/runtime/quick_start.md
index 44ca41a..d0f7c0e 100644
--- a/runtime/quick_start.md
+++ b/runtime/quick_start.md
@@ -47,11 +47,11 @@
 
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10096:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 ```
 
 ###### Server Start
@@ -93,11 +93,11 @@
 
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10095:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 ```
 
 ###### Server Start
diff --git a/runtime/quick_start_zh.md b/runtime/quick_start_zh.md
index 0bbe04e..5566020 100644
--- a/runtime/quick_start_zh.md
+++ b/runtime/quick_start_zh.md
@@ -48,11 +48,11 @@
 
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10096:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.9
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.10
 ```
 
 ###### 鏈嶅姟绔惎鍔�
@@ -92,11 +92,11 @@
 
 ```shell
 sudo docker pull \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 mkdir -p ./funasr-runtime-resources/models
 sudo docker run -p 10095:10095 -it --privileged=true \
   -v $PWD/funasr-runtime-resources/models:/workspace/models \
-  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.4
+  registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-cpu-0.4.5
 ```
 
 ###### 鏈嶅姟绔惎鍔�
diff --git a/runtime/readme.md b/runtime/readme.md
index 2f6546b..28a063d 100644
--- a/runtime/readme.md
+++ b/runtime/readme.md
@@ -17,6 +17,7 @@
 To meet the needs of different users, we have prepared different tutorials with text and images for both novice and advanced developers.
 
 ### Whats-new
+- 2024/05/15: Adapting to FunASR 1.0 model structure, docker image version funasr-runtime-sdk-en-cpu-0.1.6 (84d781d07997).
 - 2024/03/05: docker image supports ARM64 platform, update modelscope, docker image version funasr-runtime-sdk-en-cpu-0.1.5 (7cca2abc5901).
 - 2024/01/25: Optimized the VAD (Voice Activity Detection) data processing method,significantly reducing peak memory usage,memory leak optimization, docker image version funasr-runtime-sdk-en-cpu-0.1.3 (c00f9ce7a195).
 - 2024/01/03: Fixed known crash issues as well as memory leak problems, docker image version funasr-runtime-sdk-en-cpu-0.1.2 (0cdd9f4a4bb5).
@@ -42,6 +43,7 @@
 In order to meet the needs of different users for different scenarios, different tutorials are prepared:
 
 ### Whats-new
+- 2024/05/15: Real-time Transcription Service 1.10 released锛宎dapting to FunASR 1.0 model structure, docker image version funasr-runtime-sdk-online-cpu-0.1.10 (1c2adfcff84d)
 - 2024/03/05: Real-time Transcription Service 1.9 released锛宒ocker image supports ARM64 platform, update modelscope, docker image version funasr-runtime-sdk-online-cpu-0.1.9 (4a875e08c7a2)
 - 2024/01/25: Real-time Transcription Service 1.7 released锛宱ptimization of the client-side, docker image version funasr-runtime-sdk-online-cpu-0.1.7 (2aa23805572e)
 - 2024/01/03: Real-time Transcription Service 1.6 released锛孴he 2pass-offline mode supports Ngram language model decoding and WFST hotwords, while also addressing known crash issues and memory leak problems, docker image version funasr-runtime-sdk-online-cpu-0.1.6 (f99925110d27)
@@ -72,6 +74,7 @@
 To meet the needs of different users, we have prepared different tutorials with text and images for both novice and advanced developers.
 
 ### Whats-new
+- 2024/05/15: File Transcription Service 4.5 released, adapting to FunASR 1.0 model structure, docker image version funasr-runtime-sdk-cpu-0.4.5 (058b9882ae67)
 - 2024/03/05: File Transcription Service 4.4 released, docker image supports ARM64 platform, update modelscope, docker image version funasr-runtime-sdk-cpu-0.4.4 (2dc87b86dc49)
 - 2024/01/25: File Transcription Service 4.2 released, optimized the VAD (Voice Activity Detection) data processing method, significantly reducing peak memory usage, memory leak optimization, docker image version funasr-runtime-sdk-cpu-0.4.2 (befdc7b179ed)
 - 2024/01/08: File Transcription Service 4.1 released, optimized format sentence-level timestamps, docker image version funasr-runtime-sdk-cpu-0.4.1 (0250f8ef981b)
diff --git a/runtime/readme_cn.md b/runtime/readme_cn.md
index 13cd8eb..9cb7b58 100644
--- a/runtime/readme_cn.md
+++ b/runtime/readme_cn.md
@@ -19,6 +19,7 @@
 涓轰簡鏀寔涓嶅悓鐢ㄦ埛鐨勯渶姹傦紝閽堝涓嶅悓鍦烘櫙锛屽噯澶囦簡涓嶅悓鐨勫浘鏂囨暀绋嬶細
 
 ### 鏈�鏂板姩鎬�
+- 2024/05/15:   鑻辨枃绂荤嚎鏂囦欢杞啓鏈嶅姟 1.6 鍙戝竷锛岄�傞厤FunASR 1.0妯″瀷缁撴瀯锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-en-cpu-0.1.6 (84d781d07997)
 - 2024/03/05:   鑻辨枃绂荤嚎鏂囦欢杞啓鏈嶅姟 1.5 鍙戝竷锛宒ocker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-en-cpu-0.1.5 (7cca2abc5901)
 - 2024/01/25:   鑻辨枃绂荤嚎鏂囦欢杞啓鏈嶅姟 1.3 鍙戝竷锛屼紭鍖杤ad鏁版嵁澶勭悊鏂瑰紡锛屽ぇ骞呴檷浣庡嘲鍊煎唴瀛樺崰鐢紝鍐呭瓨娉勬紡浼樺寲锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-en-cpu-0.1.3 (c00f9ce7a195)
 - 2024/01/03:   鑻辨枃绂荤嚎鏂囦欢杞啓鏈嶅姟 1.2 鍙戝竷锛屼慨澶嶅凡鐭ョ殑crash闂鍙婂唴瀛樻硠婕忛棶棰橈紝dokcer闀滃儚鐗堟湰funasr-runtime-sdk-en-cpu-0.1.2 (0cdd9f4a4bb5)
@@ -36,6 +37,7 @@
 涓轰簡鏀寔涓嶅悓鐢ㄦ埛鐨勯渶姹傦紝閽堝涓嶅悓鍦烘櫙锛屽噯澶囦簡涓嶅悓鐨勫浘鏂囨暀绋嬶細
 
 ### 鏈�鏂板姩鎬�
+- 2024/05/15:   涓枃瀹炴椂璇煶鍚啓鏈嶅姟 1.10 鍙戝竷锛岄�傞厤FunASR 1.0妯″瀷缁撴瀯锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-online-cpu-0.1.10 (1c2adfcff84d)
 - 2024/03/05:   涓枃瀹炴椂璇煶鍚啓鏈嶅姟 1.9 鍙戝竷锛宒ocker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-online-cpu-0.1.9 (4a875e08c7a2)
 - 2024/01/25:   涓枃瀹炴椂璇煶鍚啓鏈嶅姟 1.7 鍙戝竷锛屽鎴风浼樺寲锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-online-cpu-0.1.7 (2aa23805572e)
 - 2024/01/03:   涓枃瀹炴椂璇煶鍚啓鏈嶅姟 1.6 鍙戝竷锛�2pass-offline妯″紡鏀寔Ngram璇█妯″瀷瑙g爜銆亀fst鐑瘝锛屽悓鏃朵慨澶嶅凡鐭ョ殑crash闂鍙婂唴瀛樻硠婕忛棶棰橈紝dokcer闀滃儚鐗堟湰funasr-runtime-sdk-online-cpu-0.1.6 (f99925110d27)
@@ -58,6 +60,7 @@
 涓轰簡鏀寔涓嶅悓鐢ㄦ埛鐨勯渶姹傦紝閽堝涓嶅悓鍦烘櫙锛屽噯澶囦簡涓嶅悓鐨勫浘鏂囨暀绋嬶細
 
 ### 鏈�鏂板姩鎬�
+- 2024/05/15:   涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.5 鍙戝竷锛岄�傞厤FunASR 1.0妯″瀷缁撴瀯锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-cpu-0.4.5 (058b9882ae67)
 - 2024/03/05:   涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.4 鍙戝竷锛宒ocker闀滃儚鏀寔arm64骞冲彴锛屽崌绾odelscope鐗堟湰锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-cpu-0.4.4 (2dc87b86dc49)
 - 2024/01/25:   涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.2 鍙戝竷锛屼紭鍖杤ad鏁版嵁澶勭悊鏂瑰紡锛屽ぇ骞呴檷浣庡嘲鍊煎唴瀛樺崰鐢紝鍐呭瓨娉勬紡浼樺寲锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-cpu-0.4.2 (befdc7b179ed)
 - 2024/01/08:   涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.1 鍙戝竷锛屼紭鍖栧彞瀛愮骇鏃堕棿鎴砵son鏍煎紡锛宒okcer闀滃儚鐗堟湰funasr-runtime-sdk-cpu-0.4.1 (0250f8ef981b)
diff --git a/runtime/websocket/bin/funasr-wss-server-2pass.cpp b/runtime/websocket/bin/funasr-wss-server-2pass.cpp
index 4bc413c..d42679b 100644
--- a/runtime/websocket/bin/funasr-wss-server-2pass.cpp
+++ b/runtime/websocket/bin/funasr-wss-server-2pass.cpp
@@ -55,11 +55,11 @@
 
     TCLAP::ValueArg<std::string> offline_model_revision(
         "", "offline-model-revision", "ASR offline model revision", false,
-        "v1.2.1", "string");
+        "v2.0.4", "string");
 
     TCLAP::ValueArg<std::string> online_model_revision(
         "", "online-model-revision", "ASR online model revision", false,
-        "v1.0.6", "string");
+        "v2.0.4", "string");
 
     TCLAP::ValueArg<std::string> quantize(
         "", QUANTIZE,
@@ -73,7 +73,7 @@
         "model_quant.onnx, vad.yaml, vad.mvn",
         false, "damo/speech_fsmn_vad_zh-cn-16k-common-onnx", "string");
     TCLAP::ValueArg<std::string> vad_revision(
-        "", "vad-revision", "VAD model revision", false, "v1.2.0", "string");
+        "", "vad-revision", "VAD model revision", false, "v2.0.4", "string");
     TCLAP::ValueArg<std::string> vad_quant(
         "", VAD_QUANT,
         "true (Default), load the model of model_quant.onnx in vad_dir. If set "
@@ -85,7 +85,7 @@
         "model_quant.onnx, punc.yaml",
         false, "damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727-onnx", "string");
     TCLAP::ValueArg<std::string> punc_revision(
-        "", "punc-revision", "PUNC model revision", false, "v1.0.2", "string");
+        "", "punc-revision", "PUNC model revision", false, "v2.0.4", "string");
     TCLAP::ValueArg<std::string> punc_quant(
         "", PUNC_QUANT,
         "true (Default), load the model of model_quant.onnx in punc_dir. If "
@@ -262,17 +262,17 @@
 
         size_t found = s_offline_asr_path.find("speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404");
         if (found != std::string::npos) {
-            model_path["offline-model-revision"]="v1.2.4";
+            model_path["offline-model-revision"]="v2.0.4";
         }
 
         found = s_offline_asr_path.find("speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404");
         if (found != std::string::npos) {
-            model_path["offline-model-revision"]="v1.0.5";
+            model_path["offline-model-revision"]="v2.0.5";
         }
 
         found = s_offline_asr_path.find("speech_paraformer-large_asr_nat-en-16k-common-vocab10020");
         if (found != std::string::npos) {
-            model_path["model-revision"]="v1.0.0";
+            model_path["model-revision"]="v2.0.4";
             s_itn_path="";
             s_lm_path="";
         }
diff --git a/runtime/websocket/bin/funasr-wss-server.cpp b/runtime/websocket/bin/funasr-wss-server.cpp
index bff4f66..5bb7def 100644
--- a/runtime/websocket/bin/funasr-wss-server.cpp
+++ b/runtime/websocket/bin/funasr-wss-server.cpp
@@ -50,7 +50,7 @@
     TCLAP::ValueArg<std::string> model_revision(
         "", "model-revision",
         "ASR model revision",
-        false, "v1.2.1", "string");
+        false, "v2.0.4", "string");
     TCLAP::ValueArg<std::string> quantize(
         "", QUANTIZE,
         "true (Default), load the model of model_quant.onnx in model_dir. If set "
@@ -63,7 +63,7 @@
     TCLAP::ValueArg<std::string> vad_revision(
         "", "vad-revision",
         "VAD model revision",
-        false, "v1.2.0", "string");
+        false, "v2.0.4", "string");
     TCLAP::ValueArg<std::string> vad_quant(
         "", VAD_QUANT,
         "true (Default), load the model of model_quant.onnx in vad_dir. If set "
@@ -77,7 +77,7 @@
     TCLAP::ValueArg<std::string> punc_revision(
         "", "punc-revision",
         "PUNC model revision",
-        false, "v1.1.7", "string");
+        false, "v2.0.4", "string");
     TCLAP::ValueArg<std::string> punc_quant(
         "", PUNC_QUANT,
         "true (Default), load the model of model_quant.onnx in punc_dir. If set "
@@ -233,17 +233,17 @@
             // modify model-revision by model name
             size_t found = s_asr_path.find("speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404");
             if (found != std::string::npos) {
-                model_path["model-revision"]="v1.2.4";
+                model_path["model-revision"]="v2.0.4";
             }
 
             found = s_asr_path.find("speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404");
             if (found != std::string::npos) {
-                model_path["model-revision"]="v1.0.5";
+                model_path["model-revision"]="v2.0.5";
             }
 
             found = s_asr_path.find("speech_paraformer-large_asr_nat-en-16k-common-vocab10020");
             if (found != std::string::npos) {
-                model_path["model-revision"]="v1.0.0";
+                model_path["model-revision"]="v2.0.4";
                 s_itn_path="";
                 s_lm_path="";
             }

--
Gitblit v1.9.1