游雁
2024-06-14 59bc02b089f7a626fe67907dcfc695eae6883f82
decoding
4个文件已修改
46 ■■■■ 已修改文件
examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/datasets/openai_datasets/datasets.py 11 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/llm_asr/model.py 31 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/sense_voice/model.py 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
@@ -69,7 +69,7 @@
  batch_size_scale_ratio_max: 2
  num_workers: 4
  audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate}
  audio_encoder_downsample_rate: 2
  audio_encoder_downsample_rate: 4
  data_split_num: 512
  batch_size_sample_max: 15
  retry: 20
funasr/datasets/openai_datasets/datasets.py
@@ -64,6 +64,8 @@
        self.max_token_length = kwargs.get("max_token_length", 1024)
        self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
        self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
        self.audio_adaptor_downsample_rate = kwargs.get("audio_adaptor_downsample_rate", 2)
        self.audio_encoder_downsample_rate = kwargs.get("audio_encoder_downsample_rate", 4)
    def get_source_len(self, index):
        item = self.index_ds[index]
@@ -136,10 +138,13 @@
                                speech = speech.permute(0, 2, 1)
                            # if speech_lengths > self.batch_size:
                            #     continue
                            if self.audio_encoder_downsample_rate == 4:
                                olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
                                olens = 1 + (olens - 3 + 2 * 1) // 2
                            elif self.audio_encoder_downsample_rate == 1:
                                olens = speech_lengths[0].item()
                            olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
                            olens = 1 + (olens - 3 + 2 * 1) // 2
                            sub_token_len = (olens - 1) // 2 + 1
                            sub_token_len = (olens - 1) // self.audio_adaptor_downsample_rate + 1
                            sub_token = [0] * sub_token_len
                            fbank_beg_i = [len(source_ids)]
                            source_ids += sub_token
funasr/models/llm_asr/model.py
@@ -498,9 +498,7 @@
        with torch.cuda.amp.autocast(enabled=False):
            # audio encoder
            encoder_out, encoder_out_lens = self.audio_encoder(
                speech.permute(0, 2, 1), speech_lengths
            )
            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
            # audio_adaptor
            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -565,6 +563,12 @@
            batch_size = int((labels_ids > 0 + 1).sum())
        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
        return loss, stats, weight
    def encode(self, speech, speech_lengths):
        # audio encoder
        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
        return encoder_out, encoder_out_lens
    def data_template(self, data):
        system, user, assistant = [], [], []
@@ -721,7 +725,8 @@
            speech = speech.to(torch.float16)
        elif kwargs.get("bf16", False):
            speech = speech.to(torch.bfloat16)
        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
        # audio encoder
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
        # audio_adaptor
        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -806,3 +811,21 @@
            ibest_writer["text_tn"][key[0]] = response_clean
        return results, meta_data
@tables.register("model_classes", "LLMASR3")
class LLMASR3(nn.Module):
    """ """
    def __init__(
        self,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
    def encode(self, speech, speech_lengths):
        # audio encoder
        encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
        return encoder_out, encoder_out_lens
funasr/models/sense_voice/model.py
@@ -1042,6 +1042,7 @@
        self.length_normalized_loss = length_normalized_loss
        self.beam_search = None
        self.activation_checkpoint = kwargs.get("activation_checkpoint", False)
        self.encoder_output_size = encoder_output_size
    def forward(
        self,
@@ -1451,6 +1452,7 @@
        self.ctc = ctc
        self.length_normalized_loss = length_normalized_loss
        self.encoder_output_size = encoder_output_size
    def forward(
        self,