| | |
| | | #### [FSMN-VAD-online model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) |
| | | ```python |
| | | inference_pipeline = pipeline( |
| | | task=Tasks.auto_speech_recognition, |
| | | task=Tasks.voice_activity_detection, |
| | | model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch', |
| | | ) |
| | | import soundfile |
| | |
| | | speech_chunk = speech[0:chunk_stride] |
| | | rec_result = inference_pipeline(audio_in=speech_chunk, param_dict=param_dict) |
| | | print(rec_result) |
| | | # next chunk, 480ms |
| | | # next chunk, 100ms |
| | | speech_chunk = speech[chunk_stride:chunk_stride+chunk_stride] |
| | | rec_result = inference_pipeline(audio_in=speech_chunk, param_dict=param_dict) |
| | | print(rec_result) |