From 1596f6f414f6f41da66506debb1dff19fffeb3ec Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 24 六月 2024 11:55:17 +0800
Subject: [PATCH] fixbug hotwords
---
funasr/datasets/large_datasets/build_dataloader.py | 54 ++++++++++++++++++++++++++++++++++++++----------------
1 files changed, 38 insertions(+), 16 deletions(-)
diff --git a/funasr/datasets/large_datasets/build_dataloader.py b/funasr/datasets/large_datasets/build_dataloader.py
index 7889e70..da04717 100644
--- a/funasr/datasets/large_datasets/build_dataloader.py
+++ b/funasr/datasets/large_datasets/build_dataloader.py
@@ -6,11 +6,12 @@
import sentencepiece as spm
from torch.utils.data import DataLoader
-from typeguard import check_argument_types
from funasr.datasets.large_datasets.dataset import Dataset
-from funasr.iterators.abs_iter_factory import AbsIterFactory
-from funasr.text.abs_tokenizer import AbsTokenizer
+from funasr.datasets.large_datasets.abs_iter_factory import AbsIterFactory
+from funasr.tokenizer.abs_tokenizer import AbsTokenizer
+
+from funasr.register import tables
def read_symbol_table(symbol_table_file):
@@ -43,7 +44,6 @@
class SentencepiecesTokenizer(AbsTokenizer):
def __init__(self, model: Union[Path, str]):
- assert check_argument_types()
self.model = str(model)
self.sp = None
@@ -64,24 +64,46 @@
return self.sp.DecodePieces(list(tokens))
+@tables.register("dataset_classes", "LargeDataset")
class LargeDataLoader(AbsIterFactory):
def __init__(self, args, mode="train"):
- symbol_table = read_symbol_table(args.token_list) if args.token_list is not None else None
- seg_dict = load_seg_dict(args.seg_dict_file) if args.seg_dict_file is not None else None
- punc_dict = load_seg_dict(args.punc_dict_file) if args.punc_dict_file is not None else None
- bpe_tokenizer = load_seg_dict(args.bpemodel_file) if args.bpemodel_file is not None else None
+ symbol_table, seg_dict, punc_dict, bpe_tokenizer = None, None, None, None
+ if hasattr(args, "token_list") and args.token_list is not None:
+ symbol_table = read_symbol_table(args.token_list)
+ if hasattr(args, "seg_dict_file") and args.seg_dict_file is not None:
+ seg_dict = load_seg_dict(args.seg_dict_file)
+ if hasattr(args, "punc_list") and args.punc_list is not None:
+ punc_dict = read_symbol_table(args.punc_list)
+ if hasattr(args, "bpemodel") and args.bpemodel is not None:
+ bpe_tokenizer = SentencepiecesTokenizer(args.bpemodel)
self.dataset_conf = args.dataset_conf
- self.frontend_conf = args.frontend_conf
+ if "frontend_conf" not in args:
+ self.frontend_conf = None
+ else:
+ self.frontend_conf = args.frontend_conf
+ self.speed_perturb = args.speed_perturb if hasattr(args, "speed_perturb") else None
logging.info("dataloader config: {}".format(self.dataset_conf))
batch_mode = self.dataset_conf.get("batch_mode", "padding")
- self.dataset = Dataset(args.data_list, symbol_table, seg_dict, punc_dict, bpe_tokenizer,
- self.dataset_conf, self.frontend_conf, speed_perturb=args.speed_perturb,
- mode=mode, batch_mode=batch_mode)
+ data_list = args.train_data_file if mode == "train" else args.valid_data_file
+ self.dataset = Dataset(
+ data_list,
+ symbol_table,
+ seg_dict,
+ punc_dict,
+ bpe_tokenizer,
+ self.dataset_conf,
+ self.frontend_conf,
+ speed_perturb=self.speed_perturb if mode == "train" else None,
+ mode=mode,
+ batch_mode=batch_mode,
+ )
def build_iter(self, epoch, shuffle=True):
self.dataset.set_epoch(epoch)
- data_loader = DataLoader(self.dataset,
- batch_size=None,
- pin_memory=True,
- num_workers=self.dataset_conf.get("num_workers", 8))
+ data_loader = DataLoader(
+ self.dataset,
+ batch_size=None,
+ pin_memory=True,
+ num_workers=self.dataset_conf.get("num_workers", 8),
+ )
return data_loader
--
Gitblit v1.9.1