msgk
2025-02-14 a8591060d3889cd7a72841fa32a7ee64b49db1d2
funasr/auto/auto_model.py
@@ -147,13 +147,16 @@
        # if spk_model is not None, build spk model else None
        spk_model = kwargs.get("spk_model", None)
        spk_kwargs = {} if kwargs.get("spk_kwargs", {}) is None else kwargs.get("spk_kwargs", {})
        cb_kwargs = (
            {} if spk_kwargs.get("cb_kwargs", {}) is None else spk_kwargs.get("cb_kwargs", {})
        )
        if spk_model is not None:
            logging.info("Building SPK model.")
            spk_kwargs["model"] = spk_model
            spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", "master")
            spk_kwargs["device"] = kwargs["device"]
            spk_model, spk_kwargs = self.build_model(**spk_kwargs)
            self.cb_model = ClusterBackend().to(kwargs["device"])
            self.cb_model = ClusterBackend(**cb_kwargs).to(kwargs["device"])
            spk_mode = kwargs.get("spk_mode", "punc_segment")
            if spk_mode not in ["default", "vad_segment", "punc_segment"]:
                logging.error("spk_mode should be one of default, vad_segment and punc_segment.")
@@ -199,6 +202,7 @@
            tokenizers_build = []
            vocab_sizes = []
            token_lists = []
            ### === only for kws ===
            token_list_files = kwargs.get("token_lists", [])
            seg_dicts = kwargs.get("seg_dicts", [])
@@ -213,9 +217,9 @@
                ### === only for kws ===
                if len(token_list_files) > 1:
                    tokenizer_conf.token_list = token_list_files[i]
                    tokenizer_conf["token_list"] = token_list_files[i]
                if len(seg_dicts) > 1:
                    tokenizer_conf.seg_dict = seg_dicts[i]
                    tokenizer_conf["seg_dict"] = seg_dicts[i]
                ### === only for kws ===
                tokenizer = tokenizer_class(**tokenizer_conf)
@@ -228,8 +232,8 @@
                if token_list is not None:
                    vocab_size = len(token_list)
                    if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
                        vocab_size = tokenizer.get_vocab_size()
                if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
                    vocab_size = tokenizer.get_vocab_size()
                token_lists.append(token_list)
                vocab_sizes.append(vocab_size)
@@ -364,7 +368,11 @@
        if pbar:
            # pbar.update(1)
            pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
        torch.cuda.empty_cache()
        device = next(model.parameters()).device
        if device.type == "cuda":
            with torch.cuda.device(device):
                torch.cuda.empty_cache()
        return asr_result_list
    def inference_with_vad(self, input, input_len=None, **cfg):
@@ -541,8 +549,41 @@
            # speaker embedding cluster after resorted
            if self.spk_model is not None and kwargs.get("return_spk_res", True):
                if raw_text is None:
                    logging.error("Missing punc_model, which is required by spk_model.")
                # 1. 先检查时间戳
                has_timestamp = (
                    hasattr(self.model, "internal_punc") or
                    self.punc_model is not None or
                    "timestamp" in result
                )
                if not has_timestamp:
                    logging.error("Need timestamp support...")
                    return results_ret_list
                # 2. 初始化 punc_res
                punc_res = None
                # 3. 根据不同情况设置 punc_res
                if hasattr(self.model, "internal_punc"):
                    punc_res = [{
                        "text": result["text"],
                        "punc_array": result.get("punc_array", []),
                        "timestamp": result.get("timestamp", [])
                    }]
                elif self.punc_model is not None:
                    punc_res = self.inference(
                        result["text"],
                        model=self.punc_model,
                        kwargs=self.punc_kwargs,
                        **cfg
                    )
                else:
                    # 如果只有时间戳,创建一个基本的 punc_res
                    punc_res = [{
                        "text": result["text"],
                        "punc_array": [],  # 空的标点数组
                        "timestamp": result["timestamp"]
                    }]
                all_segments = sorted(all_segments, key=lambda x: x[0])
                spk_embedding = result["spk_embedding"]
                labels = self.cb_model(