| | |
| | | self.drop_last = drop_last |
| | | |
| | | self.total_size = len(self.dataset) |
| | | # self.num_samples = int(math.ceil(self.total_size / self.num_replicas)) |
| | | self.num_samples = int(math.ceil(self.total_size / self.num_replicas)) |
| | | self.epoch = 0 |
| | | self.sort_size = sort_size * num_replicas |
| | | self.max_token_length = kwargs.get("max_token_length", 2048) |
| | | self.min_token_length = kwargs.get("min_token_length", 0) |
| | | self.length_scale_source = kwargs.get("length_scale_source", 1.0) |
| | | |
| | | |
| | |
| | | g = torch.Generator() |
| | | g.manual_seed(self.epoch) |
| | | random.seed(self.epoch) |
| | | indices = torch.randperm(self.total_size, generator=g).tolist() |
| | | indices = torch.randperm(len(self.dataset), generator=g).tolist() |
| | | else: |
| | | indices = list(range(self.total_size)) |
| | | |
| | | indices = list(range(len(self.dataset))) |
| | | |
| | | # Sort indices by sample length |
| | | sorted_indices = sorted(indices, key=lambda idx: self.dataset.get_source_len(idx)) |
| | | |
| | |
| | | |
| | | for idx in sorted_indices: |
| | | original_sample_length = self.dataset.get_source_len(idx) |
| | | if original_sample_length > self.max_token_length: # Skip samples that exceed the max length |
| | | if original_sample_length < self.min_token_length or original_sample_length > self.max_token_length: # Skip samples that exceed the max length |
| | | continue |
| | | # Set sample_length based on the batch type |
| | | sample_length = 1 if self.batch_type == "example" else original_sample_length |