From 0e622e694e6cb4459955f1e5942a7c53349ce640 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 19 十二月 2023 21:58:14 +0800
Subject: [PATCH] funasr2
---
funasr/datasets/audio_datasets/samplers.py | 39 +++++++++++++++++++++++----------------
1 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/funasr/datasets/fun_datasets/data_sampler.py b/funasr/datasets/audio_datasets/samplers.py
similarity index 61%
rename from funasr/datasets/fun_datasets/data_sampler.py
rename to funasr/datasets/audio_datasets/samplers.py
index 3a19a17..7d3a941 100644
--- a/funasr/datasets/fun_datasets/data_sampler.py
+++ b/funasr/datasets/audio_datasets/samplers.py
@@ -2,31 +2,38 @@
import numpy as np
+from funasr.utils.register import register_class
+
+@register_class("batch_sampler_classes", "DynamicBatchLocalShuffleSampler")
class BatchSampler(torch.utils.data.BatchSampler):
- def __init__(self, dataset, batch_type: str="example", batch_size: int=100, sort_size: int=30, drop_last: bool=False, shuffle: bool=True, **kwargs):
+ def __init__(self, dataset,
+ batch_type: str="example",
+ batch_size: int=100,
+ buffer_size: int=30,
+ drop_last: bool=False,
+ shuffle: bool=True,
+ **kwargs):
self.drop_last = drop_last
self.pre_idx = -1
self.dataset = dataset
self.total_samples = len(dataset)
- # self.batch_type = args.batch_type
- # self.batch_size = args.batch_size
- # self.sort_size = args.sort_size
- # self.max_length_token = args.max_length_token
self.batch_type = batch_type
self.batch_size = batch_size
- self.sort_size = sort_size
- self.max_length_token = kwargs.get("max_length_token", 5000)
+ self.buffer_size = buffer_size
+ self.max_token_length = kwargs.get("max_token_length", 5000)
self.shuffle_idx = np.arange(self.total_samples)
self.shuffle = shuffle
def __len__(self):
return self.total_samples
-
+
+ def set_epoch(self, epoch):
+ np.random.seed(epoch)
+
def __iter__(self):
- # print("in sampler")
if self.shuffle:
np.random.shuffle(self.shuffle_idx)
@@ -35,31 +42,31 @@
max_token = 0
num_sample = 0
- iter_num = (self.total_samples-1) // self.sort_size + 1
+ iter_num = (self.total_samples-1) // self.buffer_size + 1
# print("iter_num: ", iter_num)
for iter in range(self.pre_idx + 1, iter_num):
datalen_with_index = []
- for i in range(self.sort_size):
- idx = iter * self.sort_size + i
+ for i in range(self.buffer_size):
+ idx = iter * self.buffer_size + i
if idx >= self.total_samples:
continue
idx_map = self.shuffle_idx[idx]
# prompt = self.dataset.indexed_dataset[idx_map]["prompt"]
- sample_len_cur = self.dataset.indexed_dataset.get_source_len(self.dataset.indexed_dataset[idx_map]) + \
- self.dataset.indexed_dataset.get_target_len(self.dataset.indexed_dataset[idx_map])
+ sample_len_cur = self.dataset.get_source_len(idx_map) + \
+ self.dataset.get_target_len(idx_map)
datalen_with_index.append([idx, sample_len_cur])
datalen_with_index_sort = sorted(datalen_with_index, key=lambda x: x[1])
for item in datalen_with_index_sort:
idx, sample_len_cur_raw = item
- if sample_len_cur_raw > self.max_length_token:
+ if sample_len_cur_raw > self.max_token_length:
continue
max_token_cur = max(max_token, sample_len_cur_raw)
max_token_padding = 1 + num_sample
- if self.batch_type == 'token':
+ if self.batch_type == 'length':
max_token_padding *= max_token_cur
if max_token_padding <= self.batch_size:
batch.append(idx)
--
Gitblit v1.9.1