From c542eacb0aadcbc49c63db40429fca4e08f807a4 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 21 七月 2023 10:27:35 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add

---
 funasr/runtime/websocket/readme.md |   76 +++++++++++++++++++++++++++-----------
 1 files changed, 54 insertions(+), 22 deletions(-)

diff --git a/funasr/runtime/websocket/readme.md b/funasr/runtime/websocket/readme.md
index 0cebe64..414d6b8 100644
--- a/funasr/runtime/websocket/readme.md
+++ b/funasr/runtime/websocket/readme.md
@@ -1,3 +1,5 @@
+([绠�浣撲腑鏂嘳(https://github.com/alibaba-damo-academy/FunASR/blob/main/funasr/runtime/websocket/readme_zh.md)|English)
+
 # Service with websocket-cpp
 
 ## Export the model
@@ -5,15 +7,21 @@
 
 ```shell
 # pip3 install torch torchaudio
-pip install -U modelscope funasr
+pip3 install -U modelscope funasr
 # For the users in China, you could install with the command:
-# pip install -U modelscope funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://mirror.sjtu.edu.cn/pypi/web/simple
+# pip3 install -U modelscope funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://mirror.sjtu.edu.cn/pypi/web/simple
 ```
 
 ### Export [onnx model](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/export)
 
 ```shell
-python -m funasr.export.export_model --model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch --export-dir ./export --type onnx --quantize True
+python -m funasr.export.export_model \
+--export-dir ./export \
+--type onnx \
+--quantize True \
+--model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch \
+--model-name damo/speech_fsmn_vad_zh-cn-16k-common-pytorch \
+--model-name damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch
 ```
 
 ## Building for Linux/Unix
@@ -26,6 +34,15 @@
 tar -zxvf onnxruntime-linux-x64-1.14.0.tgz
 ```
 
+### Download ffmpeg
+```shell
+wget https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2023-07-09-12-50/ffmpeg-N-111383-g20b8688092-linux64-gpl-shared.tar.xz
+tar -xvf ffmpeg-N-111383-g20b8688092-linux64-gpl-shared.tar.xz
+# 鍥藉唴鍙互浣跨敤涓嬭堪鏂瑰紡
+# wget https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/dep_libs/ffmpeg-N-111383-g20b8688092-linux64-gpl-shared.tar.xz
+# tar -xvf ffmpeg-N-111383-g20b8688092-linux64-gpl-shared.tar.xz
+```
+
 ### Install openblas
 ```shell
 sudo apt-get install libopenblas-dev #ubuntu
@@ -36,56 +53,71 @@
 required openssl lib
 
 ```shell
-#install openssl lib for ubuntu 
-apt-get install libssl-dev
-#install openssl lib for centos
-yum install openssl-devel
+apt-get install libssl-dev #ubuntu 
+# yum install openssl-devel #centos
 
 
-git clone https://github.com/alibaba-damo-academy/FunASR.git && cd funasr/runtime/websocket
+git clone https://github.com/alibaba-damo-academy/FunASR.git && cd FunASR/funasr/runtime/websocket
 mkdir build && cd build
-cmake  -DCMAKE_BUILD_TYPE=release .. -DONNXRUNTIME_DIR=/path/to/onnxruntime-linux-x64-1.14.0
+cmake  -DCMAKE_BUILD_TYPE=release .. -DONNXRUNTIME_DIR=/path/to/onnxruntime-linux-x64-1.14.0 -DFFMPEG_DIR=/path/to/ffmpeg-N-111383-g20b8688092-linux64-gpl-shared
 make
 ```
 ## Run the websocket server
 
 ```shell
 cd bin
-./funasr-wss-server  [--model_thread_num <int>] [--decoder_thread_num <int>]
-                    [--io_thread_num <int>] [--port <int>] [--listen_ip
+./funasr-wss-server [--download-model-dir <string>]
+                    [--model-thread-num <int>] [--decoder-thread-num <int>]
+                    [--io-thread-num <int>] [--port <int>] [--listen_ip
                     <string>] [--punc-quant <string>] [--punc-dir <string>]
                     [--vad-quant <string>] [--vad-dir <string>] [--quantize
                     <string>] --model-dir <string> [--keyfile <string>]
                     [--certfile <string>] [--] [--version] [-h]
 Where:
+   --download-model-dir <string>
+     Download model from Modelscope to download_model_dir
+
    --model-dir <string>
-     default: /workspace/models/asr, the asr model path, which contains model.onnx, config.yaml, am.mvn
+     default: /workspace/models/asr, the asr model path, which contains model_quant.onnx, config.yaml, am.mvn
    --quantize <string>
-     true (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
+     true (Default), load the model of model_quant.onnx in model_dir. If set false, load the model of model.onnx in model_dir
 
    --vad-dir <string>
-     default: /workspace/models/vad, the vad model path, which contains model.onnx, vad.yaml, vad.mvn
+     default: /workspace/models/vad, the vad model path, which contains model_quant.onnx, vad.yaml, vad.mvn
    --vad-quant <string>
-     true (Default), load the model of model.onnx in vad_dir. If set true, load the model of model_quant.onnx in vad_dir
+     true (Default), load the model of model_quant.onnx in vad_dir. If set false, load the model of model.onnx in vad_dir
 
    --punc-dir <string>
-     default: /workspace/models/punc, the punc model path, which contains model.onnx, punc.yaml
+     default: /workspace/models/punc, the punc model path, which contains model_quant.onnx, punc.yaml
    --punc-quant <string>
-     true (Default), load the model of model.onnx in punc_dir. If set true, load the model of model_quant.onnx in punc_dir
+     true (Default), load the model of model_quant.onnx in punc_dir. If set false, load the model of model.onnx in punc_dir
 
-   --decoder_thread_num <int>
+   --decoder-thread-num <int>
      number of threads for decoder, default:8
-   --io_thread_num <int>
+   --io-thread-num <int>
      number of threads for network io, default:8
    --port <int>
-     listen port, default:8889
+     listen port, default:10095
    --certfile <string>
      default: ../../../ssl_key/server.crt, path of certficate for WSS connection. if it is empty, it will be in WS mode.
    --keyfile <string>
      default: ../../../ssl_key/server.key, path of keyfile for WSS connection
   
 example:
-./funasr-wss-server --model-dir /FunASR/funasr/runtime/onnxruntime/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+# you can use models downloaded from modelscope or local models:
+# download models from modelscope
+./funasr-wss-server  \
+  --download-model-dir /workspace/models \
+  --model-dir damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
+  --vad-dir damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
+  --punc-dir damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
+
+# load models from local paths
+./funasr-wss-server  \
+  --model-dir /workspace/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
+  --vad-dir /workspace/models/damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
+  --punc-dir /workspace/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
+
 ```
 
 ## Run websocket client test
@@ -116,7 +148,7 @@
      is-ssl is 1 means use wss connection, or use ws connection
 
 example:
-./funasr-wss-client --server-ip 127.0.0.1 --port 8889 --wav-path test.wav --thread-num 1 --is-ssl 1
+./funasr-wss-client --server-ip 127.0.0.1 --port 10095 --wav-path test.wav --thread-num 1 --is-ssl 1
 
 result json, example like:
 {"mode":"offline","text":"娆㈣繋澶у鏉ヤ綋楠岃揪鎽╅櫌鎺ㄥ嚭鐨勮闊宠瘑鍒ā鍨�","wav_name":"wav2"}

--
Gitblit v1.9.1