| | |
| | | self.batch_size = kwargs.get("batch_size") |
| | | self.batch_type = kwargs.get("batch_type") |
| | | self.prompt_ids_len = 0 |
| | | self.retry = kwargs.get("retry", 10) |
| | | self.retry = kwargs.get("retry", 100) |
| | | |
| | | self.permute = False |
| | | from funasr.frontends.whisper_frontend import WhisperFrontend |
| | |
| | | if self.batch_type != "example": |
| | | b, t = outputs["input_ids"].shape |
| | | if b > 1 and b * t > self.batch_size * self.batch_size_scale_ratio_max: |
| | | # beg = torch.randint(0, 2, ()).item() |
| | | # if b < 2: |
| | | # beg = 0 |
| | | logging.info( |
| | | f"Warning, b*t: {b}*{t}={b * t} > batch_size*relax: {self.batch_size_scale_ratio_max}*{self.batch_size}={self.batch_size_scale_ratio_max*self.batch_size}, drop half data {idx}th, beg:{beg}" |
| | | f"Warning, b*t: {b}*{t}={b * t} > batch_size*relax: {self.batch_size_scale_ratio_max}*{self.batch_size}={self.batch_size_scale_ratio_max*self.batch_size}, drop last data" |
| | | ) |
| | | # samples = samples[beg : beg + b : 2] |
| | | samples = samples[:-1] |
| | | continue |
| | | |