From dd3e3f4c92c5cf5e55e98a359c77dfdf396be055 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期四, 29 六月 2023 15:16:04 +0800
Subject: [PATCH] update readme

---
 funasr/runtime/websocket/funasr-wss-server.cpp |   18 +++++++++---------
 funasr/runtime/websocket/readme.md             |   33 +++++++++++++++++++++++++--------
 2 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/funasr/runtime/websocket/funasr-wss-server.cpp b/funasr/runtime/websocket/funasr-wss-server.cpp
index fd43b8d..5c8aba0 100644
--- a/funasr/runtime/websocket/funasr-wss-server.cpp
+++ b/funasr/runtime/websocket/funasr-wss-server.cpp
@@ -32,7 +32,7 @@
         false, "", "string");
     TCLAP::ValueArg<std::string> model_dir(
         "", MODEL_DIR,
-        "default: /workspace/models/asr, the asr model path, which contains model.onnx, config.yaml, am.mvn",
+        "default: /workspace/models/asr, the asr model path, which contains model_quant.onnx, config.yaml, am.mvn",
         false, "/workspace/models/asr", "string");
     TCLAP::ValueArg<std::string> model_revision(
         "", "model-revision",
@@ -40,12 +40,12 @@
         false, "v1.2.1", "string");
     TCLAP::ValueArg<std::string> quantize(
         "", QUANTIZE,
-        "true (Default), load the model of model.onnx in model_dir. If set "
-        "true, load the model of model_quant.onnx in model_dir",
+        "true (Default), load the model of model_quant.onnx in model_dir. If set "
+        "false, load the model of model.onnx in model_dir",
         false, "true", "string");
     TCLAP::ValueArg<std::string> vad_dir(
         "", VAD_DIR,
-        "default: /workspace/models/vad, the vad model path, which contains model.onnx, vad.yaml, vad.mvn",
+        "default: /workspace/models/vad, the vad model path, which contains model_quant.onnx, vad.yaml, vad.mvn",
         false, "/workspace/models/vad", "string");
     TCLAP::ValueArg<std::string> vad_revision(
         "", "vad-revision",
@@ -53,12 +53,12 @@
         false, "v1.2.0", "string");
     TCLAP::ValueArg<std::string> vad_quant(
         "", VAD_QUANT,
-        "true (Default), load the model of model.onnx in vad_dir. If set "
-        "true, load the model of model_quant.onnx in vad_dir",
+        "true (Default), load the model of model_quant.onnx in vad_dir. If set "
+        "false, load the model of model.onnx in vad_dir",
         false, "true", "string");
     TCLAP::ValueArg<std::string> punc_dir(
         "", PUNC_DIR,
-        "default: /workspace/models/punc, the punc model path, which contains model.onnx, punc.yaml", 
+        "default: /workspace/models/punc, the punc model path, which contains model_quant.onnx, punc.yaml", 
         false, "/workspace/models/punc",
         "string");
     TCLAP::ValueArg<std::string> punc_revision(
@@ -67,8 +67,8 @@
         false, "v1.1.7", "string");
     TCLAP::ValueArg<std::string> punc_quant(
         "", PUNC_QUANT,
-        "true (Default), load the model of model.onnx in punc_dir. If set "
-        "true, load the model of model_quant.onnx in punc_dir",
+        "true (Default), load the model of model_quant.onnx in punc_dir. If set "
+        "false, load the model of model.onnx in punc_dir",
         false, "true", "string");
 
     TCLAP::ValueArg<std::string> listen_ip("", "listen-ip", "listen ip", false,
diff --git a/funasr/runtime/websocket/readme.md b/funasr/runtime/websocket/readme.md
index 98cf64f..c97c63b 100644
--- a/funasr/runtime/websocket/readme.md
+++ b/funasr/runtime/websocket/readme.md
@@ -49,27 +49,31 @@
 
 ```shell
 cd bin
-./funasr-wss-server  [--model-thread-num <int>] [--decoder-thread-num <int>]
+./funasr-wss-server [--download-model-dir <string>]
+                    [--model-thread-num <int>] [--decoder-thread-num <int>]
                     [--io-thread-num <int>] [--port <int>] [--listen_ip
                     <string>] [--punc-quant <string>] [--punc-dir <string>]
                     [--vad-quant <string>] [--vad-dir <string>] [--quantize
                     <string>] --model-dir <string> [--keyfile <string>]
                     [--certfile <string>] [--] [--version] [-h]
 Where:
+   --download-model-dir <string>
+     Download model from Modelscope to download_model_dir
+
    --model-dir <string>
-     default: /workspace/models/asr, the asr model path, which contains model.onnx, config.yaml, am.mvn
+     default: /workspace/models/asr, the asr model path, which contains model_quant.onnx, config.yaml, am.mvn
    --quantize <string>
-     true (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
+     true (Default), load the model of model_quant.onnx in model_dir. If set false, load the model of model.onnx in model_dir
 
    --vad-dir <string>
-     default: /workspace/models/vad, the vad model path, which contains model.onnx, vad.yaml, vad.mvn
+     default: /workspace/models/vad, the vad model path, which contains model_quant.onnx, vad.yaml, vad.mvn
    --vad-quant <string>
-     true (Default), load the model of model.onnx in vad_dir. If set true, load the model of model_quant.onnx in vad_dir
+     true (Default), load the model of model_quant.onnx in vad_dir. If set false, load the model of model.onnx in vad_dir
 
    --punc-dir <string>
-     default: /workspace/models/punc, the punc model path, which contains model.onnx, punc.yaml
+     default: /workspace/models/punc, the punc model path, which contains model_quant.onnx, punc.yaml
    --punc-quant <string>
-     true (Default), load the model of model.onnx in punc_dir. If set true, load the model of model_quant.onnx in punc_dir
+     true (Default), load the model of model_quant.onnx in punc_dir. If set false, load the model of model.onnx in punc_dir
 
    --decoder-thread-num <int>
      number of threads for decoder, default:8
@@ -83,7 +87,20 @@
      default: ../../../ssl_key/server.key, path of keyfile for WSS connection
   
 example:
-./funasr-wss-server --model-dir /FunASR/funasr/runtime/onnxruntime/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+# you can use models downloaded from modelscope or local models:
+# download models from modelscope
+./funasr-wss-server  \
+  --download-model-dir /workspace/models \
+  --model-dir damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
+  --vad-dir damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
+  --punc-dir damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
+
+# load models from local paths
+./funasr-wss-server  \
+  --model-dir /workspace/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
+  --vad-dir /workspace/models/damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
+  --punc-dir /workspace/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
+
 ```
 
 ## Run websocket client test

--
Gitblit v1.9.1