游雁
2024-05-11 4a99a0ac273956a7f8e6608e71aafbb5202fcca8
sensevoice sanm
3个文件已修改
44 ■■■■■ 已修改文件
funasr/datasets/sense_voice_datasets/datasets.py 24 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/sense_voice/model.py 12 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/tokenizer/sentencepiece_tokenizer.py 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/datasets/sense_voice_datasets/datasets.py
@@ -53,6 +53,12 @@
        self.prompt_ids_len = 0
        self.retry = kwargs.get("retry", 5)
        self.permute = False
        from funasr.frontends.whisper_frontend import WhisperFrontend
        if isinstance(self.frontend, WhisperFrontend):
            self.permute = True
    def get_source_len(self, index):
        item = self.index_ds[index]
        return self.index_ds.get_source_len(item)
@@ -92,7 +98,8 @@
            if speech_lengths > self.batch_size:
                continue
            speech = speech.permute(0, 2, 1)
            if self.permute:
                speech = speech.permute(0, 2, 1)
            target = item["target"]
            if self.preprocessor_text:
                target = self.preprocessor_text(target)
@@ -100,8 +107,14 @@
            task = item.get("prompt", "<|ASR|>")
            text_language = item.get("text_language", "<|zh|>")
            prompt = f"{self.sos}{task}{text_language}"
            prompt_ids = self.tokenizer.encode(prompt, allowed_special="all")
            if isinstance(self.sos, str):
                prompt = f"{self.sos}{task}{text_language}"
                prompt_ids = self.tokenizer.encode(prompt, allowed_special="all")
            else:
                prompt = f"{task}{text_language}"
                prompt_ids = self.tokenizer.encode(prompt, allowed_special="all")
                prompt_ids = [self.sos] + prompt_ids
            prompt_ids_len = len(prompt_ids) - 1  # [sos, task]
            self.prompt_ids_len = prompt_ids_len
@@ -110,7 +123,10 @@
            if target_ids_len > 200:
                continue
            eos = self.tokenizer.encode(self.eos, allowed_special="all")  # [eos]
            if isinstance(self.eos, str):
                eos = self.tokenizer.encode(self.eos, allowed_special="all")  # [eos]
            else:
                eos = [self.eos]
            ids = prompt_ids + target_ids + eos  # [sos, task, lid, text, eos]
            ids_lengths = len(ids)
funasr/models/sense_voice/model.py
@@ -1005,9 +1005,7 @@
        if specaug is not None:
            specaug_class = tables.specaug_classes.get(specaug)
            specaug = specaug_class(**specaug_conf)
        if normalize is not None:
            normalize_class = tables.normalize_classes.get(normalize)
            normalize = normalize_class(**normalize_conf)
        encoder_class = tables.encoder_classes.get(encoder)
        encoder = encoder_class(input_size=input_size, **encoder_conf)
        encoder_output_size = encoder.output_size()
@@ -1026,7 +1024,7 @@
        self.ignore_id = ignore_id
        self.specaug = specaug
        self.normalize = normalize
        self.encoder = encoder
        self.decoder = decoder
@@ -1040,12 +1038,9 @@
        self.error_calculator = None
        self.share_embedding = share_embedding
        if self.share_embedding:
            self.decoder.embed = None
        self.length_normalized_loss = length_normalized_loss
        self.beam_search = None
        self.activation_checkpoint = kwargs.get("activation_checkpoint", False)
    def forward(
        self,
@@ -1139,6 +1134,7 @@
        stats = {}
        # 1. Forward decoder
        ys_pad[ys_pad == -1] = 0
        decoder_out = self.decoder(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
        if isinstance(decoder_out, (list, tuple)):
            decoder_out = decoder_out[0]
funasr/tokenizer/sentencepiece_tokenizer.py
@@ -20,6 +20,7 @@
        # "TypeError: can't pickle SwigPyObject objects",
        # when giving it as argument of "multiprocessing.Process()".
        self.sp = None
        self._build_sentence_piece_processor()
    def __repr__(self):
        return f'{self.__class__.__name__}(model="{self.bpemodel}")'
@@ -38,10 +39,13 @@
        self._build_sentence_piece_processor()
        return self.sp.DecodePieces(list(tokens))
    def encode(self, line: str) -> List[int]:
    def encode(self, line: str, **kwargs) -> List[int]:
        self._build_sentence_piece_processor()
        return self.sp.EncodeAsIds(line)
    def decode(self, line: List[int]):
    def decode(self, line: List[int], **kwargs):
        self._build_sentence_piece_processor()
        return self.sp.DecodeIds(line)
    def get_vocab_size(self):
        return self.sp.GetPieceSize()