From 24b341a7eb0ad72e021470b8f2d1ee1d0b29ea81 Mon Sep 17 00:00:00 2001 From: 游雁 <zhifu.gzf@alibaba-inc.com> Date: 星期日, 23 四月 2023 19:57:10 +0800 Subject: [PATCH] client websocket --- funasr/runtime/python/websocket/README.md | 13 +++++++++---- 1 files changed, 9 insertions(+), 4 deletions(-) diff --git a/funasr/runtime/python/websocket/README.md b/funasr/runtime/python/websocket/README.md index 2c0dec1..ba7230a 100644 --- a/funasr/runtime/python/websocket/README.md +++ b/funasr/runtime/python/websocket/README.md @@ -2,16 +2,16 @@ We can send streaming audio data to server in real-time with grpc client every 300 ms e.g., and get transcribed text when stop speaking. The audio data is in streaming, the asr inference process is in offline. -# Steps ## For the Server Install the modelscope and funasr ```shell -pip install "modelscope[audio_asr]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +pip install -U modelscope funasr +# For the users in China, you could install with the command: +# pip install -U modelscope funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple git clone https://github.com/alibaba/FunASR.git && cd FunASR -pip install --editable ./ ``` Install the requirements for server @@ -26,6 +26,11 @@ ```shell python ASR_server.py --host "0.0.0.0" --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" ``` +For the paraformer 2pass model + +```shell +python ASR_server_2pass.py --host "0.0.0.0" --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" +``` ## For the client @@ -39,7 +44,7 @@ Start client ```shell -python ASR_client.py --host "127.0.0.1" --port 10095 --chunk_size 300 +python ASR_client.py --host "127.0.0.1" --port 10095 --chunk_size 50 ``` ## Acknowledge -- Gitblit v1.9.1