zhifu gao
2024-03-15 675b4605e8d1d9a406f5e6fc3bc989ddc932b04b
examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py
@@ -6,11 +6,12 @@
from funasr import AutoModel
wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav"
chunk_size = 60000 # ms
model = AutoModel(model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="v2.0.2")
model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="v2.0.4")
res = model.generate(input=wav_file, chunk_size=chunk_size, )
res = model.generate(input=wav_file)
print(res)
# [[beg1, end1], [beg2, end2], .., [begN, endN]]
# beg/end: ms
@@ -20,6 +21,7 @@
wav_file = os.path.join(model.model_path, "example/vad_example.wav")
speech, sample_rate = soundfile.read(wav_file)
chunk_size = 200 # ms
chunk_stride = int(chunk_size * sample_rate / 1000)
cache = {}
@@ -32,6 +34,14 @@
                cache=cache,
                is_final=is_final,
                chunk_size=chunk_size,
                disable_pbar=True,
                )
    # print(res)
    if len(res[0]["value"]):
        print(res)
# 1. [[beg1, end1], [beg2, end2], .., [begN, endN]]; [[beg, end]]; [[beg1, end1], [beg2, end2]]
# 2. [[beg, -1]]
# 3. [[-1, end]]
# beg/end: ms