From adcee8828ef5d78b575043954deb662a35e318f7 Mon Sep 17 00:00:00 2001
From: huangmingming <huangmingming@deepscience.cn>
Date: 星期一, 30 一月 2023 16:02:54 +0800
Subject: [PATCH] update the minimum size of audio
---
funasr/runtime/python/grpc/grpc_server.py | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/funasr/runtime/python/grpc/grpc_server.py b/funasr/runtime/python/grpc/grpc_server.py
index f42cbb3..9165c1f 100644
--- a/funasr/runtime/python/grpc/grpc_server.py
+++ b/funasr/runtime/python/grpc/grpc_server.py
@@ -17,7 +17,7 @@
self.client_buffers = {}
self.client_transcription = {}
self.auth_user = user_allowed.split("|")
- self.inference_16k_pipline = pipeline(task=Tasks.auto_speech_recognition, model=model)
+ self.inference_16k_pipeline = pipeline(task=Tasks.auto_speech_recognition, model=model)
self.sample_rate = sample_rate
def clear_states(self, user):
@@ -81,7 +81,7 @@
result["detail"] = "decoding data: %d bytes" % len(tmp_data)
result["text"] = ""
yield Response(sentence=json.dumps(result), user=req.user, action="decoding", language=req.language)
- if len(tmp_data) < 800: #min input_len for asr model
+ if len(tmp_data) < 9600: #min input_len for asr model , 300ms
end_time = int(round(time.time() * 1000))
delay_str = str(end_time - begin_time)
result = {}
@@ -92,7 +92,7 @@
print ("user: %s , delay(ms): %s, error: %s " % (req.user, delay_str, "data_is_not_long_enough"))
yield Response(sentence=json.dumps(result), user=req.user, action="finish", language=req.language)
else:
- asr_result = self.inference_16k_pipline(audio_in=tmp_data, audio_fs = self.sample_rate)
+ asr_result = self.inference_16k_pipeline(audio_in=tmp_data, audio_fs = self.sample_rate)
if "text" in asr_result:
asr_result = asr_result['text']
else:
--
Gitblit v1.9.1