From bc723ea200144bd6fa8a5dff4b9a780feda144fc Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 29 六月 2023 18:55:01 +0800
Subject: [PATCH] dcos

---
 funasr/runtime/websocket/readme.md |   57 ++++++++++++++++++++++++++++++++++++---------------------
 1 files changed, 36 insertions(+), 21 deletions(-)

diff --git a/funasr/runtime/websocket/readme.md b/funasr/runtime/websocket/readme.md
index 0cebe64..c97c63b 100644
--- a/funasr/runtime/websocket/readme.md
+++ b/funasr/runtime/websocket/readme.md
@@ -5,15 +5,15 @@
 
 ```shell
 # pip3 install torch torchaudio
-pip install -U modelscope funasr
+pip3 install -U modelscope funasr
 # For the users in China, you could install with the command:
-# pip install -U modelscope funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://mirror.sjtu.edu.cn/pypi/web/simple
+# pip3 install -U modelscope funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://mirror.sjtu.edu.cn/pypi/web/simple
 ```
 
 ### Export [onnx model](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/export)
 
 ```shell
-python -m funasr.export.export_model --model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch --export-dir ./export --type onnx --quantize True
+python3 -m funasr.export.export_model --model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch --export-dir ./export --type onnx --quantize True
 ```
 
 ## Building for Linux/Unix
@@ -36,13 +36,11 @@
 required openssl lib
 
 ```shell
-#install openssl lib for ubuntu 
-apt-get install libssl-dev
-#install openssl lib for centos
-yum install openssl-devel
+apt-get install libssl-dev #ubuntu 
+# yum install openssl-devel #centos
 
 
-git clone https://github.com/alibaba-damo-academy/FunASR.git && cd funasr/runtime/websocket
+git clone https://github.com/alibaba-damo-academy/FunASR.git && cd FunASR/funasr/runtime/websocket
 mkdir build && cd build
 cmake  -DCMAKE_BUILD_TYPE=release .. -DONNXRUNTIME_DIR=/path/to/onnxruntime-linux-x64-1.14.0
 make
@@ -51,41 +49,58 @@
 
 ```shell
 cd bin
-./funasr-wss-server  [--model_thread_num <int>] [--decoder_thread_num <int>]
-                    [--io_thread_num <int>] [--port <int>] [--listen_ip
+./funasr-wss-server [--download-model-dir <string>]
+                    [--model-thread-num <int>] [--decoder-thread-num <int>]
+                    [--io-thread-num <int>] [--port <int>] [--listen_ip
                     <string>] [--punc-quant <string>] [--punc-dir <string>]
                     [--vad-quant <string>] [--vad-dir <string>] [--quantize
                     <string>] --model-dir <string> [--keyfile <string>]
                     [--certfile <string>] [--] [--version] [-h]
 Where:
+   --download-model-dir <string>
+     Download model from Modelscope to download_model_dir
+
    --model-dir <string>
-     default: /workspace/models/asr, the asr model path, which contains model.onnx, config.yaml, am.mvn
+     default: /workspace/models/asr, the asr model path, which contains model_quant.onnx, config.yaml, am.mvn
    --quantize <string>
-     true (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
+     true (Default), load the model of model_quant.onnx in model_dir. If set false, load the model of model.onnx in model_dir
 
    --vad-dir <string>
-     default: /workspace/models/vad, the vad model path, which contains model.onnx, vad.yaml, vad.mvn
+     default: /workspace/models/vad, the vad model path, which contains model_quant.onnx, vad.yaml, vad.mvn
    --vad-quant <string>
-     true (Default), load the model of model.onnx in vad_dir. If set true, load the model of model_quant.onnx in vad_dir
+     true (Default), load the model of model_quant.onnx in vad_dir. If set false, load the model of model.onnx in vad_dir
 
    --punc-dir <string>
-     default: /workspace/models/punc, the punc model path, which contains model.onnx, punc.yaml
+     default: /workspace/models/punc, the punc model path, which contains model_quant.onnx, punc.yaml
    --punc-quant <string>
-     true (Default), load the model of model.onnx in punc_dir. If set true, load the model of model_quant.onnx in punc_dir
+     true (Default), load the model of model_quant.onnx in punc_dir. If set false, load the model of model.onnx in punc_dir
 
-   --decoder_thread_num <int>
+   --decoder-thread-num <int>
      number of threads for decoder, default:8
-   --io_thread_num <int>
+   --io-thread-num <int>
      number of threads for network io, default:8
    --port <int>
-     listen port, default:8889
+     listen port, default:10095
    --certfile <string>
      default: ../../../ssl_key/server.crt, path of certficate for WSS connection. if it is empty, it will be in WS mode.
    --keyfile <string>
      default: ../../../ssl_key/server.key, path of keyfile for WSS connection
   
 example:
-./funasr-wss-server --model-dir /FunASR/funasr/runtime/onnxruntime/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+# you can use models downloaded from modelscope or local models:
+# download models from modelscope
+./funasr-wss-server  \
+  --download-model-dir /workspace/models \
+  --model-dir damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
+  --vad-dir damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
+  --punc-dir damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
+
+# load models from local paths
+./funasr-wss-server  \
+  --model-dir /workspace/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
+  --vad-dir /workspace/models/damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
+  --punc-dir /workspace/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
+
 ```
 
 ## Run websocket client test
@@ -116,7 +131,7 @@
      is-ssl is 1 means use wss connection, or use ws connection
 
 example:
-./funasr-wss-client --server-ip 127.0.0.1 --port 8889 --wav-path test.wav --thread-num 1 --is-ssl 1
+./funasr-wss-client --server-ip 127.0.0.1 --port 10095 --wav-path test.wav --thread-num 1 --is-ssl 1
 
 result json, example like:
 {"mode":"offline","text":"娆㈣繋澶у鏉ヤ綋楠岃揪鎽╅櫌鎺ㄥ嚭鐨勮闊宠瘑鍒ā鍨�","wav_name":"wav2"}

--
Gitblit v1.9.1