funasr/auto/auto_model.py
@@ -141,7 +141,7 @@ kwargs = download_model(**kwargs) set_all_random_seed(kwargs.get("seed", 0)) device = kwargs.get("device", "cuda") if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0: device = "cpu" @@ -234,11 +234,9 @@ time1 = time.perf_counter() with torch.no_grad(): pdb.set_trace() results, meta_data = model.inference(**batch, **kwargs) time2 = time.perf_counter() pdb.set_trace() asr_result_list.extend(results) # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()