From dc682db808eb5f425f0dbed4c5e7feb0a334955f Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 十一月 2023 11:43:05 +0800
Subject: [PATCH] update funasr.text -> funasr.tokenizer fix bug export
---
funasr/datasets/data_sampler.py | 54 ++++++++++++++++++++++++++++++++++--------------------
1 files changed, 34 insertions(+), 20 deletions(-)
diff --git a/funasr/datasets/data_sampler.py b/funasr/datasets/data_sampler.py
index 2875d8d..6b3407c 100644
--- a/funasr/datasets/data_sampler.py
+++ b/funasr/datasets/data_sampler.py
@@ -1,29 +1,42 @@
import torch
+import numpy as np
+
class BatchSampler(torch.utils.data.BatchSampler):
- def __init__(self, dataset=None, args=None, drop_last=True, ):
+ def __init__(self, dataset, batch_size_type: str="example", batch_size: int=14, sort_size: int=30, drop_last: bool=False, shuffle: bool=True, **kwargs):
self.drop_last = drop_last
self.pre_idx = -1
self.dataset = dataset
- self.batch_size_type = args.batch_size_type
- self.batch_size = args.batch_size
- self.sort_size = args.sort_size
- self.max_length_token = args.max_length_token
self.total_samples = len(dataset)
+ # self.batch_size_type = args.batch_size_type
+ # self.batch_size = args.batch_size
+ # self.sort_size = args.sort_size
+ # self.max_length_token = args.max_length_token
+ self.batch_size_type = batch_size_type
+ self.batch_size = batch_size
+ self.sort_size = sort_size
+ self.max_length_token = kwargs.get("max_length_token", 5000)
+ self.shuffle_idx = np.arange(self.total_samples)
+ self.shuffle = shuffle
def __len__(self):
return self.total_samples
-
def __iter__(self):
+ print("in sampler")
+
+ if self.shuffle:
+ np.random.shuffle(self.shuffle_idx)
+
batch = []
max_token = 0
num_sample = 0
-
+
iter_num = (self.total_samples-1) // self.sort_size + 1
+ print("iter_num: ", iter_num)
for iter in range(self.pre_idx + 1, iter_num):
datalen_with_index = []
for i in range(self.sort_size):
@@ -31,30 +44,31 @@
if idx >= self.total_samples:
continue
- if self.batch_size_type == "example":
- sample_len_cur = 1
- else:
- idx_map = self.dataset.shuffle_idx[idx]
- # prompt = self.dataset.indexed_dataset[idx_map]["prompt"]
- sample_len_cur = self.dataset.indexed_dataset[idx_map]["source_len"] + \
- self.dataset.indexed_dataset[idx_map]["target_len"]
+ idx_map = self.shuffle_idx[idx]
+ # prompt = self.dataset.indexed_dataset[idx_map]["prompt"]
+ sample_len_cur = self.dataset.indexed_dataset[idx_map]["source_len"] + \
+ self.dataset.indexed_dataset[idx_map]["target_len"]
datalen_with_index.append([idx, sample_len_cur])
datalen_with_index_sort = sorted(datalen_with_index, key=lambda x: x[1])
for item in datalen_with_index_sort:
- idx, sample_len_cur = item
- if sample_len_cur > self.max_length_token:
+ idx, sample_len_cur_raw = item
+ if sample_len_cur_raw > self.max_length_token:
continue
- max_token_cur = max(max_token, sample_len_cur)
- max_token_padding = (1 + num_sample) * max_token_cur
+
+ max_token_cur = max(max_token, sample_len_cur_raw)
+ max_token_padding = 1 + num_sample
+ if self.batch_size_type == 'token':
+ max_token_padding *= max_token_cur
if max_token_padding <= self.batch_size:
batch.append(idx)
max_token = max_token_cur
num_sample += 1
else:
yield batch
- max_token = sample_len_cur
- num_sample = 1
batch = [idx]
+ max_token = sample_len_cur_raw
+ num_sample = 1
+
\ No newline at end of file
--
Gitblit v1.9.1