| | |
| | | |
| | | if __name__ == '__main__': |
| | | output_dir = None |
| | | inference_pipline = pipeline( |
| | | inference_pipeline = pipeline( |
| | | task=Tasks.voice_activity_detection, |
| | | model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch", |
| | | model_revision='v1.2.0', |
| | |
| | | else: |
| | | is_final = False |
| | | param_dict['is_final'] = is_final |
| | | segments_result = inference_pipline(audio_in=speech[sample_offset: sample_offset + step], |
| | | segments_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + step], |
| | | param_dict=param_dict) |
| | | print(segments_result) |
| | | |