From e9d2cfc3a134b00f4e98271fbee3838d1ccecbcc Mon Sep 17 00:00:00 2001
From: VirtuosoQ <2416050435@qq.com>
Date: 星期五, 26 四月 2024 14:59:30 +0800
Subject: [PATCH] FunASR java http  client

---
 examples/industrial_data_pretraining/paraformer_streaming/demo.py |   18 ++++++++++--------
 1 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/examples/industrial_data_pretraining/paraformer_streaming/demo.py b/examples/industrial_data_pretraining/paraformer_streaming/demo.py
index 601a531..57356b8 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/demo.py
+++ b/examples/industrial_data_pretraining/paraformer_streaming/demo.py
@@ -3,13 +3,16 @@
 # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
 #  MIT License  (https://opensource.org/licenses/MIT)
 
+import os
+
 from funasr import AutoModel
 
-chunk_size = [5, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
-encoder_chunk_look_back = 0 #number of chunks to lookback for encoder self-attention
-decoder_chunk_look_back = 0 #number of encoder chunks to lookback for decoder cross-attention
-wav_file="/Users/zhifu/Downloads/NCYzUhAtZNI_0015.wav"
-model = AutoModel(model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online", model_revision="v2.0.4")
+chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
+encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
+decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
+model = AutoModel(model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online")
+
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
 res = model.generate(input=wav_file,
             chunk_size=chunk_size,
             encoder_chunk_look_back=encoder_chunk_look_back,
@@ -17,12 +20,11 @@
             )
 print(res)
 
-# exit()
 
 import soundfile
-import os
 
-# wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
 speech, sample_rate = soundfile.read(wav_file)
 
 chunk_stride = chunk_size[1] * 960 # 600ms銆�480ms

--
Gitblit v1.9.1