| | |
| | | vad_kwargs={"max_single_segment_time": 30000}, |
| | | ) |
| | | |
| | | DecodingOptions = { |
| | | "task": "transcribe", |
| | | "language": None, |
| | | "beam_size": None, |
| | | "fp16": True, |
| | | "without_timestamps": False, |
| | | "prompt": None, |
| | | } |
| | | res = model.generate( |
| | | language=None, |
| | | task="transcribe", |
| | | DecodingOptions=DecodingOptions, |
| | | batch_size_s=0, |
| | | input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav") |
| | | |
| | |
| | | speech = speech.to(device=kwargs["device"])[0, :, :] |
| | | speech_lengths = speech_lengths.to(device=kwargs["device"]) |
| | | |
| | | # detect the spoken language |
| | | _, probs = self.model.detect_language(speech) |
| | | print(f"Detected language: {max(probs, key=probs.get)}") |
| | | # # detect the spoken language |
| | | # _, probs = self.model.detect_language(speech) |
| | | # print(f"Detected language: {max(probs, key=probs.get)}") |
| | | |
| | | # decode the audio |
| | | options = whisper.DecodingOptions(language=kwargs.get("language", None), fp16=False) |
| | | options = whisper.DecodingOptions(**kwargs.get("DecodingOptions", {})) |
| | | result = whisper.decode(self.model, speech, options) |
| | | |
| | | results = [] |