| | |
| | | else: |
| | | # extract fbank feats |
| | | time1 = time.perf_counter() |
| | | pdb.set_trace() |
| | | sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), |
| | | data_type=kwargs.get("data_type", "sound"), |
| | | tokenizer=tokenizer) |
| | |
| | | encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) |
| | | if isinstance(encoder_out, tuple): |
| | | encoder_out = encoder_out[0] |
| | | pdb.set_trace() |
| | | ocr = ocr_sample_list[0] |
| | | |
| | | ocr_list_new = [[x + 1 if x != 0 else x for x in sublist] for sublist in ocr_sample_list] |
| | | ocr = torch.tensor(ocr_list_new) |
| | | ocr_lengths = ocr.new_full([1], dtype=torch.long, fill_value=ocr.size(1)) |
| | | pdb.set_trace() |
| | | ocr, ocr_lens, _ = self.text_encoder(ocr, ocr_lengths) |
| | | pdb.set_trace() |
| | | # c. Passed the encoder result and the beam search |
| | |
| | | return [load_audio_text_image_video(audio, fs=fs, audio_fs=audio_fs, data_type=data_type, **kwargs) for audio in data_or_path_or_list] |
| | | if isinstance(data_or_path_or_list, str) and data_or_path_or_list.startswith('http'): # download url to local file |
| | | data_or_path_or_list = download_from_url(data_or_path_or_list) |
| | | pdb.set_trace() |
| | | if isinstance(data_or_path_or_list, str) and os.path.exists(data_or_path_or_list): # local file |
| | | if data_type is None or data_type == "sound": |
| | | data_or_path_or_list, audio_fs = torchaudio.load(data_or_path_or_list) |