From 3eee773814c392e497557bbad501e0add4c8eca9 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期日, 09 六月 2024 02:11:42 +0800
Subject: [PATCH] fix bug
---
funasr/datasets/openai_datasets/datasets.py | 107 +++++++++++++++++++++++++++++------------------------
1 files changed, 59 insertions(+), 48 deletions(-)
diff --git a/funasr/datasets/openai_datasets/datasets.py b/funasr/datasets/openai_datasets/datasets.py
index 9bd0698..9c74ef1 100644
--- a/funasr/datasets/openai_datasets/datasets.py
+++ b/funasr/datasets/openai_datasets/datasets.py
@@ -51,7 +51,7 @@
self.batch_size = kwargs.get("batch_size")
self.batch_type = kwargs.get("batch_type")
self.prompt_ids_len = 0
- self.retry = kwargs.get("retry", 5)
+ self.retry = kwargs.get("retry", 10)
self.permute = False
from funasr.frontends.whisper_frontend import WhisperFrontend
@@ -60,6 +60,9 @@
self.permute = True
self.pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
+ # self.kwargs = kwargs
+ self.max_token_length = kwargs.get("max_token_length", 1024)
+ self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
def get_source_len(self, index):
item = self.index_ds[index]
@@ -77,7 +80,9 @@
# pdb.set_trace()
output = None
+
for idx in range(self.retry):
+ badcase_flag = False
if idx == 0:
index_cur = index
else:
@@ -112,9 +117,14 @@
"<|endofspeech|>", ""
)
if sub_str.startswith("!"):
-
- data_src = load_audio_text_image_video(sub_str[1:], fs=self.fs)
-
+ try:
+ data_src = load_audio_text_image_video(sub_str[1:], fs=self.fs)
+ except Exception as e:
+ logging.error(
+ f"Loading wav failed! {str(e)}, {traceback.format_exc()}"
+ )
+ badcase_flag = True
+ continue
speech, speech_lengths = extract_fbank(
data_src,
data_type=self.data_type,
@@ -134,6 +144,8 @@
source_ids += sub_token
fbank_mask_i += [1] * len(sub_token)
+ if badcase_flag:
+ continue
source_mask = [-100] * len(source_ids)
target_out = f"{target_out}<|im_end|>"
target_ids = self.tokenizer.encode(target_out)
@@ -142,9 +154,16 @@
fbank_mask += fbank_mask_i
fbank_beg.append(fbank_beg_i)
- input_ids = torch.tensor(input_ids, dtype=torch.int64)
- attention_mask = torch.tensor([len(input_ids)], dtype=torch.int32)
- labels = torch.tensor(labels, dtype=torch.int64)
+ if len(input_ids) > self.max_token_length:
+ logging.info(
+ f"input_ids > max_token_length: {len(input_ids)}>{self.max_token_length}, {item}"
+ )
+ badcase_flag = True
+ if badcase_flag:
+ continue
+ input_ids = torch.tensor(input_ids, dtype=torch.int64) # [: self.max_token_length]
+ attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+ labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
fbank = speech[0, :, :]
fbank_lens = speech_lengths
@@ -165,51 +184,43 @@
return output
def collator(self, samples: list = None):
- outputs = {}
- for sample in samples:
- if sample is None:
- continue
- for key in sample.keys():
- if key not in outputs:
- outputs[key] = []
- outputs[key].append(sample[key])
- for key, data_list in outputs.items():
- if isinstance(data_list[0], torch.Tensor):
- if data_list[0].dtype == torch.int64 or data_list[0].dtype == torch.int32:
+ for idx in range(self.retry):
+ badcase_flag = False
- pad_value = self.int_pad_value
- else:
- pad_value = self.float_pad_value
+ outputs = {}
+ for sample in samples:
+ if sample is None:
+ continue
+ for key in sample.keys():
+ if key not in outputs:
+ outputs[key] = []
+ outputs[key].append(sample[key])
- outputs[key] = torch.nn.utils.rnn.pad_sequence(
- data_list, batch_first=True, padding_value=pad_value
- )
-
- if self.batch_type != "example":
- for i in range(10):
- outputs = self._filter_badcase(outputs, i=i)
-
- return outputs
-
- def _filter_badcase(self, outputs, i=0):
- b, t, _ = outputs["speech"].shape
-
- if b * t > self.batch_size * 1.25:
- beg = torch.randint(0, 2, ()).item()
- if b < 2:
- beg = 0
- logging.info(
- f"Warning, b * t: {b * t} > {self.batch_size}, drop half data {i}th, beg:{beg}"
- )
for key, data_list in outputs.items():
- outputs[key] = outputs[key][beg : beg + b : 2]
+ if isinstance(data_list[0], torch.Tensor):
+ if data_list[0].dtype == torch.int64 or data_list[0].dtype == torch.int32:
- speech_lengths_max = outputs["speech_lengths"].max().item()
- outputs["speech"] = outputs["speech"][:, :speech_lengths_max, :]
- text_lengths_max = outputs["text_lengths"].max().item()
- outputs["text"] = outputs["text"][:, :text_lengths_max]
- target_mask_lengths_max = outputs["target_mask_lengths"].max().item()
- outputs["target_mask"] = outputs["target_mask"][:, :target_mask_lengths_max]
+ pad_value = self.int_pad_value
+ else:
+ pad_value = self.float_pad_value
+
+ outputs[key] = torch.nn.utils.rnn.pad_sequence(
+ data_list, batch_first=True, padding_value=pad_value
+ )
+
+ if self.batch_type != "example":
+ b, t = outputs["input_ids"].shape
+ if b * t > self.batch_size * self.batch_size_scale_ratio_max:
+ beg = torch.randint(0, 2, ()).item()
+ if b < 2:
+ beg = 0
+ logging.info(
+ f"Warning, b * t: {b * t} > {self.batch_size_scale_ratio_max} * {self.batch_size}, b: {b}, t: {t}, drop half data {idx}th, beg:{beg}"
+ )
+ samples = samples[beg : beg + b : 2]
+ continue
+
+ break
return outputs
--
Gitblit v1.9.1