From 997374b88fe6b2ae5cb4dcaf47d78cb3eff09fc2 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 19:56:52 +0800
Subject: [PATCH] add ctc inference code (#1806)
---
funasr/datasets/openai_datasets/datasets.py | 22 ++++++++++++----------
1 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/funasr/datasets/openai_datasets/datasets.py b/funasr/datasets/openai_datasets/datasets.py
index 8cb0926..8d243ac 100644
--- a/funasr/datasets/openai_datasets/datasets.py
+++ b/funasr/datasets/openai_datasets/datasets.py
@@ -51,7 +51,7 @@
self.batch_size = kwargs.get("batch_size")
self.batch_type = kwargs.get("batch_type")
self.prompt_ids_len = 0
- self.retry = kwargs.get("retry", 10)
+ self.retry = kwargs.get("retry", 100)
self.permute = False
from funasr.frontends.whisper_frontend import WhisperFrontend
@@ -62,6 +62,8 @@
self.pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
# self.kwargs = kwargs
self.max_token_length = kwargs.get("max_token_length", 1024)
+ self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
+ self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
def get_source_len(self, index):
item = self.index_ds[index]
@@ -154,12 +156,15 @@
fbank_beg.append(fbank_beg_i)
if len(input_ids) > self.max_token_length:
+ logging.info(
+ f"input_ids > max_token_length: {len(input_ids)}>{self.max_token_length}, {item}"
+ )
badcase_flag = True
if badcase_flag:
continue
- input_ids = torch.tensor(input_ids, dtype=torch.int64)
- attention_mask = torch.tensor([len(input_ids)], dtype=torch.int32)
- labels = torch.tensor(labels, dtype=torch.int64)
+ input_ids = torch.tensor(input_ids, dtype=torch.int64) # [: self.max_token_length]
+ attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+ labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
fbank = speech[0, :, :]
fbank_lens = speech_lengths
@@ -207,14 +212,11 @@
if self.batch_type != "example":
b, t = outputs["input_ids"].shape
- if b * t > self.batch_size * 2:
- beg = torch.randint(0, 2, ()).item()
- if b < 2:
- beg = 0
+ if b > 1 and b * t > self.batch_size_token_max:
logging.info(
- f"Warning, b * t: {b * t} > {self.batch_size}, b: {b}, t: {t}, drop half data {idx}th, beg:{beg}"
+ f"Warning, {idx}th, b*t: {b}*{t}={b * t} > batch_size_sample_max: {self.batch_size_token_max}, drop last data"
)
- samples = samples[beg : beg + b : 2]
+ samples = samples[:-1]
continue
break
--
Gitblit v1.9.1