| | |
| | | chars = string.ascii_letters + string.digits |
| | | if isinstance(data_in, str) and data_in.startswith('http'): # url |
| | | data_in = download_from_url(data_in) |
| | | |
| | | if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt; |
| | | _, file_extension = os.path.splitext(data_in) |
| | | file_extension = file_extension.lower() |
| | |
| | | batch_size = kwargs.get("batch_size", 1) |
| | | # if kwargs.get("device", "cpu") == "cpu": |
| | | # batch_size = 1 |
| | | |
| | | |
| | | key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key) |
| | | |
| | | speed_stats = {} |
| | |
| | | |
| | | time1 = time.perf_counter() |
| | | with torch.no_grad(): |
| | | pdb.set_trace() |
| | | results, meta_data = model.inference(**batch, **kwargs) |
| | | time2 = time.perf_counter() |
| | | |
| | | pdb.set_trace() |
| | | asr_result_list.extend(results) |
| | | |
| | | # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item() |