From 3ac03e448b7673604eb86f619b27521fca55f34d Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 21 三月 2024 01:36:39 +0800
Subject: [PATCH] train & finetune llm-asr (#1519)
---
funasr/datasets/llm_datasets_vicuna/samplers.py | 197 +++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 197 insertions(+), 0 deletions(-)
diff --git a/funasr/datasets/llm_datasets_vicuna/samplers.py b/funasr/datasets/llm_datasets_vicuna/samplers.py
index c728d9c..61f7d94 100644
--- a/funasr/datasets/llm_datasets_vicuna/samplers.py
+++ b/funasr/datasets/llm_datasets_vicuna/samplers.py
@@ -232,3 +232,200 @@
def set_epoch(self, epoch):
self.epoch = epoch
+
+
+@tables.register("batch_sampler_classes", "CustomDistributedBufferBatchSampler_fn")
+def CustomDistributedBatchSampler_fn(dataset, **kwargs):
+ dataloader_args = {}
+ dataloader_args["batch_sampler"] = CustomDistributedBufferBatchSampler(dataset, **kwargs)
+ dataloader_args["num_workers"] = kwargs.get("num_workers", 4)
+ dataloader_args["pin_memory"] = kwargs.get("pin_memory", True)
+
+ return dataloader_args
+
+
+@tables.register("batch_sampler_classes", "CustomDistributedBufferBatchSampler")
+class CustomDistributedBatchSampler(Sampler):
+ def __init__(self, dataset,
+ batch_size,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ drop_last=False,
+ is_training: bool = True,
+ sort_size: int = 1024,
+ **kwargs,
+ ):
+
+ try:
+ rank = dist.get_rank()
+ num_replicas = dist.get_world_size()
+ except:
+ rank = 0
+ num_replicas = 1
+ self.rank = rank
+ self.num_replicas = num_replicas
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self.is_training = is_training
+ self.shuffle = shuffle and is_training
+ self.drop_last = drop_last
+ # self.total_size = len(dataset)
+ if self.drop_last:
+ self.total_size = (len(self.dataset) // (batch_size * num_replicas)) * (batch_size * num_replicas)
+ else:
+ self.total_size = math.ceil(len(self.dataset) / (batch_size * num_replicas)) * (batch_size * num_replicas)
+ self.num_samples = int(self.total_size // self.num_replicas)
+ self.epoch = 0
+ self.max_token_length = kwargs.get("max_token_length", None)
+ self.length_scale_source = kwargs.get("length_scale_source", 1.0)
+ self.sort_size = sort_size
+
+ def __iter__(self):
+ # Generate a list of indices
+ if self.shuffle:
+ g = torch.Generator()
+ g.manual_seed(self.epoch)
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
+ else:
+ indices = list(range(len(self.dataset)))
+
+ # Add extra samples to make it evenly divisible
+ padding_size = self.total_size - len(indices)
+ if padding_size <= len(indices):
+ indices += indices[:padding_size]
+ else:
+ indices += (indices * (padding_size // len(indices)) + indices[:padding_size % len(indices)])
+
+ assert len(indices) == self.total_size
+
+ # Subsample
+ indices = indices[self.rank:self.total_size:self.num_replicas]
+ assert len(indices) == self.num_samples
+
+ # Filter out indices with length greater than the max length, if provided
+ if self.max_token_length is not None:
+ filtered_indices = []
+ for idx in indices:
+ source_len = self.dataset.get_source_len(idx) / self.length_scale_source
+ if source_len <= self.max_token_length:
+ filtered_indices.append(idx)
+ indices = filtered_indices
+
+ # Buffer sorting logic
+ sorted_batches = []
+ buffer = []
+
+ for idx in indices:
+ buffer.append(idx)
+ if len(buffer) >= self.sort_size:
+ # Sort the buffer based on some criteria, e.g., dataset sample length
+ buffer.sort(key=lambda x: self.dataset.get_source_len(x))
+ sorted_batches.extend(self._create_batches_from_buffer(buffer))
+ buffer = []
+
+ # Handle the remaining items in the buffer
+ if buffer:
+ buffer.sort(key=lambda x: self.dataset.get_source_len(x))
+ sorted_batches.extend(self._create_batches_from_buffer(buffer))
+
+ return iter(sorted_batches)
+
+ def _create_batches_from_buffer(self, buffer):
+ # Function to convert the sorted buffer into batches
+ batched_buffer = [buffer[i:i + self.batch_size] for i in range(0, len(buffer), self.batch_size)]
+ if self.drop_last and len(batched_buffer[-1]) != self.batch_size:
+ batched_buffer = batched_buffer[:-1]
+ return batched_buffer
+
+ def __len__(self):
+
+ return self.num_samples // self.batch_size
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
+
+
+@tables.register("batch_sampler_classes", "CustomDistributedDynamicBatchSampler_fn")
+def CustomDistributedBatchSampler_fn(dataset, **kwargs):
+ dataloader_args = {}
+ dataloader_args["batch_sampler"] = CustomDistributedDynamicBatchSampler(dataset, **kwargs)
+ dataloader_args["num_workers"] = kwargs.get("num_workers", 4)
+ dataloader_args["pin_memory"] = kwargs.get("pin_memory", True)
+
+ return dataloader_args
+
+
+@tables.register("batch_sampler_classes", "CustomDistributedDynamicBatchSampler")
+class CustomDistributedDynamicBatchSampler(Sampler):
+ def __init__(self, dataset,
+ batch_size,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ drop_last=False,
+ is_training: bool = True,
+ **kwargs,
+ ):
+
+ try:
+ rank = dist.get_rank()
+ num_replicas = dist.get_world_size()
+ except:
+ rank = 0
+ num_replicas = 1
+ self.rank = rank
+ self.num_replicas = num_replicas
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self.is_training = is_training
+ self.shuffle = shuffle and is_training
+ self.drop_last = drop_last
+
+ self.total_size = len(self.dataset)
+ # self.num_samples = int(math.ceil(self.total_size / self.num_replicas))
+ self.epoch = 0
+
+ def __iter__(self):
+ if self.shuffle:
+ g = torch.Generator()
+ g.manual_seed(self.epoch)
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
+ else:
+ indices = list(range(len(self.dataset)))
+
+ indices = indices[self.rank:self.total_size:self.num_replicas]
+
+ batches = []
+ batch = []
+ max_len_in_batch = 0
+ current_batch_length = 0
+
+ for idx in indices:
+ sample_length = self.dataset.get_source_len(idx)
+ potential_batch_length = (max_len_in_batch if sample_length < max_len_in_batch else sample_length) * (
+ len(batch) + 1)
+
+ if potential_batch_length <= self.batch_size:
+ batch.append(idx)
+ if sample_length > max_len_in_batch:
+ max_len_in_batch = sample_length
+ current_batch_length = max_len_in_batch * len(batch)
+ else:
+ batches.append(batch)
+ batch = [idx]
+ max_len_in_batch = sample_length
+ current_batch_length = max_len_in_batch
+
+ # Add the last batch if it's not empty and we're not dropping it
+ if batch and (not self.drop_last or len(batch) * max_len_in_batch == self.batch_size):
+ batches.append(batch)
+
+ return iter(batches)
+
+ def __len__(self):
+
+ return -1
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
--
Gitblit v1.9.1