语帆
2024-02-28 0a4e01bd7d789504cc5986fa848e5822bef4dfc9
funasr/auto/auto_model.py
@@ -141,7 +141,7 @@
            kwargs = download_model(**kwargs)
        
        set_all_random_seed(kwargs.get("seed", 0))
        device = kwargs.get("device", "cuda")
        if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0:
            device = "cpu"
@@ -161,19 +161,18 @@
            vocab_size = len(tokenizer.token_list)
        else:
            vocab_size = -1
        # build frontend
        frontend = kwargs.get("frontend", None)
        if frontend is not None:
            frontend_class = tables.frontend_classes.get(frontend)
            frontend = frontend_class(**kwargs["frontend_conf"])
            kwargs["frontend"] = frontend
            kwargs["input_size"] = frontend.output_size()
        # build model
        model_class = tables.model_classes.get(kwargs["model"])
        model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
        model.to(device)
        
        # init_param
@@ -209,14 +208,12 @@
        kwargs.update(cfg)
        model = self.model if model is None else model
        model.eval()
        pdb.set_trace()
        batch_size = kwargs.get("batch_size", 1)
        # if kwargs.get("device", "cpu") == "cpu":
        #     batch_size = 1
        
        key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
        pdb.set_trace()
        speed_stats = {}
        asr_result_list = []
@@ -225,25 +222,21 @@
        pbar = tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None
        time_speech_total = 0.0
        time_escape_total = 0.0
        pdb.set_trace()
        for beg_idx in range(0, num_samples, batch_size):
            pdb.set_trace()
            end_idx = min(num_samples, beg_idx + batch_size)
            data_batch = data_list[beg_idx:end_idx]
            key_batch = key_list[beg_idx:end_idx]
            batch = {"data_in": data_batch, "key": key_batch}
            pdb.set_trace()
            if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
                batch["data_in"] = data_batch[0]
                batch["data_lengths"] = input_len
        
            time1 = time.perf_counter()
            with torch.no_grad():
                pdb.set_trace()
                results, meta_data = model.inference(**batch, **kwargs)
            time2 = time.perf_counter()
            
            pdb.set_trace()
            asr_result_list.extend(results)
            # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()