From 937e507977cc9e49ce323f8b2933087d0fe52698 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期日, 16 四月 2023 22:29:32 +0800
Subject: [PATCH] Merge pull request #363 from alibaba-damo-academy/main

---
 funasr/runtime/python/websocket/README.md |    8 ++++++--
 1 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/funasr/runtime/python/websocket/README.md b/funasr/runtime/python/websocket/README.md
index 2c0dec1..d8e7bf1 100644
--- a/funasr/runtime/python/websocket/README.md
+++ b/funasr/runtime/python/websocket/README.md
@@ -2,7 +2,6 @@
 We can send streaming audio data to server in real-time with grpc client every 300 ms e.g., and get transcribed text when stop speaking.
 The audio data is in streaming, the asr inference process is in offline.
 
-# Steps
 
 ## For the Server
 
@@ -26,6 +25,11 @@
 ```shell
 python ASR_server.py --host "0.0.0.0" --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
 ```
+For the paraformer 2pass model
+
+```shell
+python ASR_server_2pass.py --host "0.0.0.0" --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+```
 
 ## For the client
 
@@ -39,7 +43,7 @@
 Start client
 
 ```shell
-python ASR_client.py --host "127.0.0.1" --port 10095 --chunk_size 300
+python ASR_client.py --host "127.0.0.1" --port 10095 --chunk_size 50
 ```
 
 ## Acknowledge

--
Gitblit v1.9.1