From 787b9d8e7e0107f6cd74a71b3d29494617960ccf Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 06 六月 2023 22:09:42 +0800
Subject: [PATCH] model license
---
funasr/runtime/python/websocket/wss_srv_asr.py | 14 +++++++-------
docs/model_zoo/modelscope_models.md | 2 +-
MODEL_LICENSE | 6 +++---
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/MODEL_LICENSE b/MODEL_LICENSE
index e505980..3d9e410 100644
--- a/MODEL_LICENSE
+++ b/MODEL_LICENSE
@@ -9,7 +9,7 @@
涓轰簡淇濊瘉鏇村ソ鐨勭ぞ鍖哄悎浣滐紝鎴戜滑鍒跺畾浜嗕互涓嬪崗璁紝甯屾湜鎮ㄤ粩缁嗛槄璇诲苟閬靛畧鏈崗璁��
1 瀹氫箟
-鏈崗璁腑锛孾FunASR 杞欢]鎸� FunASR 寮�婧愭ā鍨嬪強鍏惰鐢熷搧锛屽寘鎷� Finetune 鍚庣殑妯″瀷锛沎鎮╙鎸囦娇鐢ㄣ�佷慨鏀广�佸垎浜拰瀛︿範[FunASR 杞欢]鐨勪釜浜烘垨缁勭粐銆�
+鏈崗璁腑锛孾FunASR 杞欢]鎸� FunASR 寮�婧愭ā鍨嬫潈閲嶅強鍏惰鐢熷搧锛屽寘鎷� Finetune 鍚庣殑妯″瀷锛沎鎮╙鎸囦娇鐢ㄣ�佷慨鏀广�佸垎浜拰瀛︿範[FunASR 杞欢]鐨勪釜浜烘垨缁勭粐銆�
2 璁稿彲鍜岄檺鍒�
2.1 璁稿彲
@@ -18,7 +18,7 @@
2.2 闄愬埗
-鎮ㄥ湪浣跨敤銆佸鍒躲�佷慨鏀瑰拰鍒嗕韩[FunASR 杞欢]鏃讹紝蹇呴』娉ㄦ槑鍑哄浠ュ強浣滆�呬俊鎭�傚苟涓旓紝灏哰FunASR 杞欢]涓婁紶鑷冲叾浠栫涓夋柟骞冲彴浠ヤ緵涓嬭浇锛岄渶瑕佽幏寰楅澶栬鍙紝鍙�氳繃瀹樻柟閭欢锛坒unasr@list.alibaba-inc.com锛夎繘琛岀敵璇凤紙鍏嶈垂锛夈��
+鎮ㄥ湪浣跨敤銆佸鍒躲�佷慨鏀瑰拰鍒嗕韩[FunASR 杞欢]鏃讹紝蹇呴』娉ㄦ槑鍑哄浠ュ強浣滆�呬俊鎭紝骞朵繚鐣橻FunASR 杞欢]涓浉鍏虫ā鍨嬪悕绉般��
3 璐d换鍜岄闄╂壙鎷�
[FunASR 杞欢]浠呬綔涓哄弬鑰冨拰瀛︿範浣跨敤锛屼笉瀵规偍浣跨敤鎴栦慨鏀筟FunASR 杞欢]閫犳垚鐨勪换浣曠洿鎺ユ垨闂存帴鎹熷け鎵挎媴浠讳綍璐d换銆傛偍瀵筟FunASR 杞欢]鐨勪娇鐢ㄥ拰淇敼搴旇鑷鎵挎媴椋庨櫓銆�
@@ -54,7 +54,7 @@
You are free to use, copy, modify, and share [FunASR software] under the conditions of this agreement.
2.2 Restrictions
-You should indicate the code and model source and author information when using, copying, modifying and sharing [FunASR software]. To upload the [FunASR software] to other third-party platforms for download, an additional license is required, which can be applied for free by sending an email to the official email address (funasr@list.alibaba-inc.com).
+You should indicate the code and model source and author information when using, copying, modifying and sharing [FunASR software]. You should keep the relevant names of models in [FunASR software].
3 Responsibility and Risk
[FunASR software] is for reference and learning purposes only and is not responsible for any direct or indirect losses caused by your use or modification of [FunASR software]. You should take responsibility and risks for your use and modification of [FunASR software].
diff --git a/docs/model_zoo/modelscope_models.md b/docs/model_zoo/modelscope_models.md
index cdbe3ff..9f67fb9 100644
--- a/docs/model_zoo/modelscope_models.md
+++ b/docs/model_zoo/modelscope_models.md
@@ -1,7 +1,7 @@
# Pretrained Models on ModelScope
## Model License
-You are free to use, copy, modify, and share FunASR models under the conditions of this agreement. You should indicate the model source and author information when using, copying, modifying and sharing FunASR models. To upload the FunASR models to other third-party platforms for download, an additional license is required, which can be applied for free by sending an email to the official email address (funasr@list.alibaba-inc.com). Full model license could see [license](https://github.com/alibaba-damo-academy/FunASR/blob/main/MODEL_LICENSE)
+You are free to use, copy, modify, and share FunASR models under the conditions of this agreement. You should indicate the model source and author information when using, copying, modifying and sharing FunASR models. You should keep the relevant names of models in [FunASR software].. Full model license could see [license](https://github.com/alibaba-damo-academy/FunASR/blob/main/MODEL_LICENSE)
## Model Zoo
diff --git a/funasr/runtime/python/websocket/wss_srv_asr.py b/funasr/runtime/python/websocket/wss_srv_asr.py
index 948619b..3810cd6 100644
--- a/funasr/runtime/python/websocket/wss_srv_asr.py
+++ b/funasr/runtime/python/websocket/wss_srv_asr.py
@@ -95,7 +95,7 @@
websocket.param_dict_punc = {'cache': list()}
websocket.vad_pre_idx = 0
speech_start = False
- speech_end_i = False
+ speech_end_i = -1
websocket.wav_name = "microphone"
websocket.mode = "2pass"
print("new user connected", flush=True)
@@ -124,7 +124,7 @@
# asr online
frames_asr_online.append(message)
- websocket.param_dict_asr_online["is_final"] = speech_end_i
+ websocket.param_dict_asr_online["is_final"] = speech_end_i != -1
if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.param_dict_asr_online["is_final"]:
if websocket.mode == "2pass" or websocket.mode == "online":
audio_in = b"".join(frames_asr_online)
@@ -134,14 +134,14 @@
frames_asr.append(message)
# vad online
speech_start_i, speech_end_i = await async_vad(websocket, message)
- if speech_start_i:
+ if speech_start_i != -1:
speech_start = True
beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
frames_pre = frames[-beg_bias:]
frames_asr = []
frames_asr.extend(frames_pre)
# asr punc offline
- if speech_end_i or not websocket.is_speaking:
+ if speech_end_i != -1 or not websocket.is_speaking:
# print("vad end point")
if websocket.mode == "2pass" or websocket.mode == "offline":
audio_in = b"".join(frames_asr)
@@ -172,15 +172,15 @@
segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
- speech_start = False
- speech_end = False
+ speech_start = -1
+ speech_end = -1
if len(segments_result) == 0 or len(segments_result["text"]) > 1:
return speech_start, speech_end
if segments_result["text"][0][0] != -1:
speech_start = segments_result["text"][0][0]
if segments_result["text"][0][1] != -1:
- speech_end = True
+ speech_end = segments_result["text"][0][1]
return speech_start, speech_end
--
Gitblit v1.9.1