| | |
| | | batch_sampler = CustomDistributedBatchSampler(dataset, **kwargs) |
| | | |
| | | else: |
| | | # if kwargs.get("sort_size", -1) > 0: |
| | | # batch_sampler = CustomDistributedBufferDynamicBatchSampler(dataset, **kwargs) |
| | | # else: |
| | | # batch_sampler = CustomDistributedDynamicBatchSampler(dataset, **kwargs) |
| | | batch_sampler = CustomDistributedDynamicBatchSampler(dataset, **kwargs) |
| | | if kwargs.get("sort_size", -1) > 0: |
| | | batch_sampler = CustomDistributedBufferDynamicBatchSampler(dataset, **kwargs) |
| | | else: |
| | | batch_sampler = CustomDistributedDynamicBatchSampler(dataset, **kwargs) |
| | | # batch_sampler = CustomDistributedDynamicBatchSampler(dataset, **kwargs) |
| | | |
| | | dataloader_args["batch_sampler"] = batch_sampler |
| | | dataloader_args["num_workers"] = kwargs.get("num_workers", 4) |
| | |
| | | self.total_size = len(self.dataset) |
| | | # self.num_samples = int(math.ceil(self.total_size / self.num_replicas)) |
| | | self.epoch = 0 |
| | | self.max_token_length = kwargs.get("max_token_length", 2048) |
| | | self.length_scale_source = kwargs.get("length_scale_source", 1.0) |
| | | |
| | | def __iter__(self): |
| | | if self.shuffle: |
| | |
| | | |
| | | for idx in indices: |
| | | sample_length = self.dataset.get_source_len(idx) |
| | | if sample_length > self.max_token_length: |
| | | continue |
| | | potential_batch_length = (max_len_in_batch if sample_length < max_len_in_batch else sample_length) * ( |
| | | len(batch) + 1) |
| | | |
| | |
| | | batch.append(idx) |
| | | if sample_length > max_len_in_batch: |
| | | max_len_in_batch = sample_length |
| | | current_batch_length = max_len_in_batch * len(batch) |
| | | # current_batch_length = max_len_in_batch * len(batch) |
| | | else: |
| | | batches.append(batch) |
| | | batch = [idx] |
| | | max_len_in_batch = sample_length |
| | | current_batch_length = max_len_in_batch |
| | | # current_batch_length = max_len_in_batch |
| | | |
| | | # Add the last batch if it's not empty and we're not dropping it |
| | | if batch and (not self.drop_last or len(batch) * max_len_in_batch == self.batch_size): |
| | |
| | | class CustomDistributedBufferDynamicBatchSampler(DistributedSampler): |
| | | def __init__(self, dataset, |
| | | batch_size, |
| | | batch_type="token", |
| | | num_replicas=None, |
| | | rank=None, |
| | | shuffle=True, |
| | |
| | | self.num_replicas = num_replicas |
| | | self.dataset = dataset |
| | | self.batch_size = batch_size |
| | | self.batch_type = batch_type |
| | | self.is_training = is_training |
| | | self.shuffle = shuffle and is_training |
| | | self.drop_last = drop_last |
| | |
| | | self.total_size = len(self.dataset) |
| | | # self.num_samples = int(math.ceil(self.total_size / self.num_replicas)) |
| | | self.epoch = 0 |
| | | self.sort_size = sort_size |
| | | |
| | | self.sort_size = sort_size * num_replicas |
| | | self.max_token_length = kwargs.get("max_token_length", 2048) |
| | | self.length_scale_source = kwargs.get("length_scale_source", 1.0) |
| | | |
| | | def __iter__(self): |
| | | if self.shuffle: |
| | | g = torch.Generator() |
| | | g.manual_seed(self.epoch) |
| | | indices = torch.randperm(self.total_size, generator=g).tolist() |
| | | indices = torch.randperm(len(self.dataset), generator=g).tolist() |
| | | else: |
| | | indices = list(range(self.total_size)) |
| | | |
| | | # Distribute indices among replicas |
| | | indices = indices[self.rank:self.total_size:self.num_replicas] |
| | | indices = list(range(len(self.dataset))) |
| | | |
| | | # Sort indices into buffers |
| | | sorted_buffers = [sorted(indices[i:i + self.sort_size], key=lambda idx: self.dataset.get_source_len(idx)) for i in range(0, len(indices), self.sort_size)] |
| | | |
| | | batches = [] |
| | | for buffer in sorted_buffers: |
| | | # Create sorted buffers and form batches |
| | | buffer_batches = [] |
| | | for i in range(0, len(indices), self.sort_size): |
| | | buffer = sorted(indices[i:i + self.sort_size], key=lambda idx: self.dataset.get_source_len(idx)) |
| | | batch = [] |
| | | max_len_in_batch = 0 |
| | | for idx in buffer: |
| | | sample_length = self.dataset.get_source_len(idx) |
| | | original_sample_length = self.dataset.get_source_len(idx) |
| | | if original_sample_length > self.max_sample_length: |
| | | continue |
| | | sample_length = 1 if self.batch_type == "example" else original_sample_length |
| | | potential_batch_length = max(max_len_in_batch, sample_length) * (len(batch) + 1) |
| | | if potential_batch_length <= self.batch_size: |
| | | batch.append(idx) |
| | | max_len_in_batch = max(max_len_in_batch, sample_length) |
| | | else: |
| | | batches.append(batch) |
| | | buffer_batches.append(batch) |
| | | batch = [idx] |
| | | max_len_in_batch = sample_length |
| | | |
| | | # Add the last batch if it's not empty and we're not dropping it |
| | | if batch and (not self.drop_last or len(batch) * max_len_in_batch == self.batch_size): |
| | | batches.append(batch) |
| | | if batch: |
| | | buffer_batches.append(batch) |
| | | |
| | | return iter(batches) |
| | | # Ensure each rank gets the same number of batches, duplicate data if needed |
| | | batches_per_rank = math.ceil(len(buffer_batches) / self.num_replicas) |
| | | total_batches_needed = batches_per_rank * self.num_replicas |
| | | buffer_batches.extend(buffer_batches[:total_batches_needed - len(buffer_batches)]) |
| | | |
| | | # Evenly distribute batches from buffer_batches to each rank |
| | | rank_batches = [[] for _ in range(self.num_replicas)] |
| | | for i, batch in enumerate(buffer_batches): |
| | | rank_batches[i % self.num_replicas].append(batch) |
| | | |
| | | # Assign all batches for the current rank directly |
| | | final_batches = rank_batches[self.rank] |
| | | |
| | | return iter(final_batches) |
| | | |
| | | |
| | | def __len__(self): |