游雁
2024-06-09 1163110135c625a8a3ebd94e050d9adb5b55bb84
fix bug
1个文件已修改
8 ■■■■ 已修改文件
funasr/datasets/openai_datasets/datasets.py 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/datasets/openai_datasets/datasets.py
@@ -51,7 +51,7 @@
        self.batch_size = kwargs.get("batch_size")
        self.batch_type = kwargs.get("batch_type")
        self.prompt_ids_len = 0
        self.retry = kwargs.get("retry", 10)
        self.retry = kwargs.get("retry", 100)
        self.permute = False
        from funasr.frontends.whisper_frontend import WhisperFrontend
@@ -212,13 +212,9 @@
            if self.batch_type != "example":
                b, t = outputs["input_ids"].shape
                if b > 1 and b * t > self.batch_size * self.batch_size_scale_ratio_max:
                    # beg = torch.randint(0, 2, ()).item()
                    # if b < 2:
                    #     beg = 0
                    logging.info(
                        f"Warning, b*t: {b}*{t}={b * t} > batch_size*relax: {self.batch_size_scale_ratio_max}*{self.batch_size}={self.batch_size_scale_ratio_max*self.batch_size}, drop half data {idx}th, beg:{beg}"
                        f"Warning, b*t: {b}*{t}={b * t} > batch_size*relax: {self.batch_size_scale_ratio_max}*{self.batch_size}={self.batch_size_scale_ratio_max*self.batch_size}, drop last data"
                    )
                    # samples = samples[beg : beg + b : 2]
                    samples = samples[:-1]
                    continue