From e5be2853474189425947e66d795bf6130730bc06 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期六, 08 六月 2024 17:49:36 +0800
Subject: [PATCH] fix bug
---
funasr/datasets/openai_datasets/datasets.py | 12 ++++++++----
1 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/funasr/datasets/openai_datasets/datasets.py b/funasr/datasets/openai_datasets/datasets.py
index 8cb0926..3bc6aa0 100644
--- a/funasr/datasets/openai_datasets/datasets.py
+++ b/funasr/datasets/openai_datasets/datasets.py
@@ -62,6 +62,7 @@
self.pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
# self.kwargs = kwargs
self.max_token_length = kwargs.get("max_token_length", 1024)
+ self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
def get_source_len(self, index):
item = self.index_ds[index]
@@ -154,12 +155,15 @@
fbank_beg.append(fbank_beg_i)
if len(input_ids) > self.max_token_length:
+ logging.info(
+ f"input_ids > max_token_length: {len(input_ids)}>{self.max_token_length}, {item}"
+ )
badcase_flag = True
if badcase_flag:
continue
- input_ids = torch.tensor(input_ids, dtype=torch.int64)
- attention_mask = torch.tensor([len(input_ids)], dtype=torch.int32)
- labels = torch.tensor(labels, dtype=torch.int64)
+ input_ids = torch.tensor(input_ids, dtype=torch.int64) # [: self.max_token_length]
+ attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+ labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
fbank = speech[0, :, :]
fbank_lens = speech_lengths
@@ -207,7 +211,7 @@
if self.batch_type != "example":
b, t = outputs["input_ids"].shape
- if b * t > self.batch_size * 2:
+ if b * t > self.batch_size * self.batch_size_scale_ratio_max:
beg = torch.randint(0, 2, ()).item()
if b < 2:
beg = 0
--
Gitblit v1.9.1