VirtuosoQ
2024-04-26 e9d2cfc3a134b00f4e98271fbee3838d1ccecbcc
examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py
@@ -6,13 +6,12 @@
from funasr import AutoModel
wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav"
chunk_size = 60000 # ms
model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_fsmn_vad_zh-cn-16k-common-streaming", model_revision="v2.0.0")
model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch")
res = model(input=wav_file,
            chunk_size=chunk_size,
            )
res = model.generate(input=wav_file)
print(res)
# [[beg1, end1], [beg2, end2], .., [begN, endN]]
# beg/end: ms
@@ -22,16 +21,27 @@
wav_file = os.path.join(model.model_path, "example/vad_example.wav")
speech, sample_rate = soundfile.read(wav_file)
chunk_stride = int(chunk_size * 16000 / 1000)
chunk_size = 200 # ms
chunk_stride = int(chunk_size * sample_rate / 1000)
cache = {}
for i in range(int(len((speech)-1)/chunk_stride+1)):
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
for i in range(total_chunk_num):
    speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
    is_final = i == int(len((speech)-1)/chunk_stride+1) - 1
    res = model(input=speech_chunk,
    is_final = i == total_chunk_num - 1
    res = model.generate(input=speech_chunk,
                cache=cache,
                is_final=is_final,
                chunk_size=chunk_size,
                disable_pbar=True,
                )
    print(res)
    # print(res)
    if len(res[0]["value"]):
        print(res)
# 1. [[beg1, end1], [beg2, end2], .., [begN, endN]]; [[beg, end]]; [[beg1, end1], [beg2, end2]]
# 2. [[beg, -1]]
# 3. [[-1, end]]
# beg/end: ms