| | |
| | |
|
| | | ```shell
|
| | | # pip3 install torch torchaudio
|
| | | pip install -U modelscope funasr
|
| | | pip3 install -U modelscope funasr
|
| | | # For the users in China, you could install with the command:
|
| | | # pip install -U modelscope funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://mirror.sjtu.edu.cn/pypi/web/simple
|
| | | # pip3 install -U modelscope funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://mirror.sjtu.edu.cn/pypi/web/simple
|
| | | ```
|
| | |
|
| | | ### Export [onnx model](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/export)
|
| | |
|
| | | ```shell
|
| | | python -m funasr.export.export_model --model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch --export-dir ./export --type onnx --quantize True
|
| | | python -m funasr.export.export_model \
|
| | | --export-dir ./export \
|
| | | --type onnx \
|
| | | --quantize True \
|
| | | --model-name damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch \
|
| | | --model-name damo/speech_fsmn_vad_zh-cn-16k-common-pytorch \
|
| | | --model-name damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch
|
| | | ```
|
| | |
|
| | | ## Building for Linux/Unix
|
| | |
| | | # yum install openssl-devel #centos
|
| | |
|
| | |
|
| | | git clone https://github.com/alibaba-damo-academy/FunASR.git && cd funasr/runtime/websocket
|
| | | git clone https://github.com/alibaba-damo-academy/FunASR.git && cd FunASR/funasr/runtime/websocket
|
| | | mkdir build && cd build
|
| | | cmake -DCMAKE_BUILD_TYPE=release .. -DONNXRUNTIME_DIR=/path/to/onnxruntime-linux-x64-1.14.0
|
| | | make
|
| | |
| | |
|
| | | ```shell
|
| | | cd bin
|
| | | ./funasr-wss-server [--model-thread-num <int>] [--decoder-thread-num <int>]
|
| | | ./funasr-wss-server [--download-model-dir <string>]
|
| | | [--model-thread-num <int>] [--decoder-thread-num <int>]
|
| | | [--io-thread-num <int>] [--port <int>] [--listen_ip
|
| | | <string>] [--punc-quant <string>] [--punc-dir <string>]
|
| | | [--vad-quant <string>] [--vad-dir <string>] [--quantize
|
| | | <string>] --model-dir <string> [--keyfile <string>]
|
| | | [--certfile <string>] [--] [--version] [-h]
|
| | | Where:
|
| | | --download-model-dir <string>
|
| | | Download model from Modelscope to download_model_dir
|
| | |
|
| | | --model-dir <string>
|
| | | default: /workspace/models/asr, the asr model path, which contains model.onnx, config.yaml, am.mvn
|
| | | default: /workspace/models/asr, the asr model path, which contains model_quant.onnx, config.yaml, am.mvn
|
| | | --quantize <string>
|
| | | true (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
|
| | | true (Default), load the model of model_quant.onnx in model_dir. If set false, load the model of model.onnx in model_dir
|
| | |
|
| | | --vad-dir <string>
|
| | | default: /workspace/models/vad, the vad model path, which contains model.onnx, vad.yaml, vad.mvn
|
| | | default: /workspace/models/vad, the vad model path, which contains model_quant.onnx, vad.yaml, vad.mvn
|
| | | --vad-quant <string>
|
| | | true (Default), load the model of model.onnx in vad_dir. If set true, load the model of model_quant.onnx in vad_dir
|
| | | true (Default), load the model of model_quant.onnx in vad_dir. If set false, load the model of model.onnx in vad_dir
|
| | |
|
| | | --punc-dir <string>
|
| | | default: /workspace/models/punc, the punc model path, which contains model.onnx, punc.yaml
|
| | | default: /workspace/models/punc, the punc model path, which contains model_quant.onnx, punc.yaml
|
| | | --punc-quant <string>
|
| | | true (Default), load the model of model.onnx in punc_dir. If set true, load the model of model_quant.onnx in punc_dir
|
| | | true (Default), load the model of model_quant.onnx in punc_dir. If set false, load the model of model.onnx in punc_dir
|
| | |
|
| | | --decoder-thread-num <int>
|
| | | number of threads for decoder, default:8
|
| | |
| | | default: ../../../ssl_key/server.key, path of keyfile for WSS connection
|
| | |
|
| | | example:
|
| | | ./funasr-wss-server --model-dir /FunASR/funasr/runtime/onnxruntime/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
| | | # you can use models downloaded from modelscope or local models:
|
| | | # download models from modelscope
|
| | | ./funasr-wss-server \
|
| | | --download-model-dir /workspace/models \
|
| | | --model-dir damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
|
| | | --vad-dir damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
|
| | | --punc-dir damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
|
| | |
|
| | | # load models from local paths
|
| | | ./funasr-wss-server \
|
| | | --model-dir /workspace/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-onnx \
|
| | | --vad-dir /workspace/models/damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
|
| | | --punc-dir /workspace/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-onnx
|
| | |
|
| | | ```
|
| | |
|
| | | ## Run websocket client test
|