游雁
2024-03-27 9b4e9cc8a0311e5243d69b73ed073e7ea441982e
funasr/datasets/large_datasets/build_dataloader.py
@@ -8,9 +8,10 @@
from torch.utils.data import DataLoader
from funasr.datasets.large_datasets.dataset import Dataset
from funasr.iterators.abs_iter_factory import AbsIterFactory
from funasr.text.abs_tokenizer import AbsTokenizer
from funasr.datasets.large_datasets.abs_iter_factory import AbsIterFactory
from funasr.tokenizer.abs_tokenizer import AbsTokenizer
from funasr.register import tables
def read_symbol_table(symbol_table_file):
    if isinstance(symbol_table_file, str):
@@ -61,7 +62,7 @@
        self._build_sentence_piece_processor()
        return self.sp.DecodePieces(list(tokens))
@tables.register("dataset_classes", "LargeDataset")
class LargeDataLoader(AbsIterFactory):
    def __init__(self, args, mode="train"):
        symbol_table, seg_dict, punc_dict, bpe_tokenizer = None, None, None, None
@@ -69,12 +70,15 @@
            symbol_table = read_symbol_table(args.token_list)
        if hasattr(args, "seg_dict_file") and args.seg_dict_file is not None:
            seg_dict = load_seg_dict(args.seg_dict_file)
        if hasattr(args, "punc_dict_file") and args.punc_dict_file is not None:
            punc_dict = read_symbol_table(args.punc_dict_file)
        if hasattr(args, "punc_list") and args.punc_list is not None:
            punc_dict = read_symbol_table(args.punc_list)
        if hasattr(args, "bpemodel") and args.bpemodel is not None:
            bpe_tokenizer = SentencepiecesTokenizer(args.bpemodel)
        self.dataset_conf = args.dataset_conf
        self.frontend_conf = args.frontend_conf
        if "frontend_conf" not in args:
            self.frontend_conf =  None
        else:
            self.frontend_conf = args.frontend_conf
        self.speed_perturb = args.speed_perturb if hasattr(args, "speed_perturb") else None 
        logging.info("dataloader config: {}".format(self.dataset_conf))
        batch_mode = self.dataset_conf.get("batch_mode", "padding")