游雁
2024-06-09 3eee773814c392e497557bbad501e0add4c8eca9
funasr/datasets/openai_datasets/datasets.py
@@ -154,13 +154,16 @@
                fbank_mask += fbank_mask_i
                fbank_beg.append(fbank_beg_i)
            # if len(input_ids) > self.max_token_length:
            #     badcase_flag = True
            if len(input_ids) > self.max_token_length:
                logging.info(
                    f"input_ids > max_token_length: {len(input_ids)}>{self.max_token_length}, {item}"
                )
                badcase_flag = True
            if badcase_flag:
                continue
            input_ids = torch.tensor(input_ids, dtype=torch.int64)[: self.max_token_length]
            attention_mask = torch.tensor([len(input_ids)], dtype=torch.int32)
            labels = torch.tensor(labels, dtype=torch.int64)[: self.max_token_length]
            input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
            attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
            labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
            fbank = speech[0, :, :]
            fbank_lens = speech_lengths
@@ -213,7 +216,7 @@
                    if b < 2:
                        beg = 0
                    logging.info(
                        f"Warning, b * t: {b * t} > {self.batch_size}, b: {b}, t: {t}, drop half data {idx}th, beg:{beg}"
                        f"Warning, b * t: {b * t} > {self.batch_size_scale_ratio_max} * {self.batch_size}, b: {b}, t: {t}, drop half data {idx}th, beg:{beg}"
                    )
                    samples = samples[beg : beg + b : 2]
                    continue