examples/common_voice/whisper_lid/demo_funasr.py
@@ -16,4 +16,4 @@ for wav_id in multilingual_wavs: wav_file = f"{model.model_path}/examples/{wav_id}" res = model.generate(input=wav_file, data_type="sound", inference_clip_length=250) print("detect sample {}: {}".format(wav_id, res)) print("detect sample {}: {}".format(wav_id, res))