From 4ace5a95b052d338947fc88809a440ccd55cf6b4 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 16 十一月 2023 16:39:52 +0800
Subject: [PATCH] funasr pages
---
funasr/datasets/large_datasets/build_dataloader.py | 75 ++++++++++++++++++++++++++++++++-----
1 files changed, 65 insertions(+), 10 deletions(-)
diff --git a/funasr/datasets/large_datasets/build_dataloader.py b/funasr/datasets/large_datasets/build_dataloader.py
index 37fbb7c..6c2da2a 100644
--- a/funasr/datasets/large_datasets/build_dataloader.py
+++ b/funasr/datasets/large_datasets/build_dataloader.py
@@ -1,10 +1,15 @@
import logging
+from pathlib import Path
+from typing import Iterable
+from typing import List
+from typing import Union
-import yaml
-
+import sentencepiece as spm
from torch.utils.data import DataLoader
+
from funasr.datasets.large_datasets.dataset import Dataset
from funasr.iterators.abs_iter_factory import AbsIterFactory
+from funasr.text.abs_tokenizer import AbsTokenizer
def read_symbol_table(symbol_table_file):
@@ -22,15 +27,65 @@
return symbol_table
-class ArkDataLoader(AbsIterFactory):
- def __init__(self, data_list, dict_file, config_file, mode="train"):
- symbol_table = read_symbol_table(dict_file)
- with open(config_file, "r") as fin:
- configs = yaml.load(fin, Loader=yaml.FullLoader)
- self.dataset_conf = configs["dataset_conf"]
+def load_seg_dict(seg_dict_file):
+ seg_dict = {}
+ assert isinstance(seg_dict_file, str)
+ with open(seg_dict_file, "r", encoding="utf8") as f:
+ lines = f.readlines()
+ for line in lines:
+ s = line.strip().split()
+ key = s[0]
+ value = s[1:]
+ seg_dict[key] = " ".join(value)
+ return seg_dict
+
+
+class SentencepiecesTokenizer(AbsTokenizer):
+ def __init__(self, model: Union[Path, str]):
+ self.model = str(model)
+ self.sp = None
+
+ def __repr__(self):
+ return f'{self.__class__.__name__}(model="{self.model}")'
+
+ def _build_sentence_piece_processor(self):
+ if self.sp is None:
+ self.sp = spm.SentencePieceProcessor()
+ self.sp.load(self.model)
+
+ def text2tokens(self, line: str) -> List[str]:
+ self._build_sentence_piece_processor()
+ return self.sp.EncodeAsPieces(line)
+
+ def tokens2text(self, tokens: Iterable[str]) -> str:
+ self._build_sentence_piece_processor()
+ return self.sp.DecodePieces(list(tokens))
+
+
+class LargeDataLoader(AbsIterFactory):
+ def __init__(self, args, mode="train"):
+ symbol_table, seg_dict, punc_dict, bpe_tokenizer = None, None, None, None
+ if hasattr(args, "token_list") and args.token_list is not None:
+ symbol_table = read_symbol_table(args.token_list)
+ if hasattr(args, "seg_dict_file") and args.seg_dict_file is not None:
+ seg_dict = load_seg_dict(args.seg_dict_file)
+ if hasattr(args, "punc_list") and args.punc_list is not None:
+ punc_dict = read_symbol_table(args.punc_list)
+ if hasattr(args, "bpemodel") and args.bpemodel is not None:
+ bpe_tokenizer = SentencepiecesTokenizer(args.bpemodel)
+ self.dataset_conf = args.dataset_conf
+ if "frontend_conf" not in args:
+ self.frontend_conf = None
+ else:
+ self.frontend_conf = args.frontend_conf
+ self.speed_perturb = args.speed_perturb if hasattr(args, "speed_perturb") else None
logging.info("dataloader config: {}".format(self.dataset_conf))
- self.dataset = Dataset(data_list, symbol_table,
- self.dataset_conf, mode=mode)
+ batch_mode = self.dataset_conf.get("batch_mode", "padding")
+ data_list = args.train_data_file if mode == "train" else args.valid_data_file
+ self.dataset = Dataset(data_list, symbol_table, seg_dict, punc_dict, bpe_tokenizer,
+ self.dataset_conf, self.frontend_conf,
+ speed_perturb=self.speed_perturb if mode == "train" else None,
+ mode=mode, batch_mode=batch_mode)
def build_iter(self, epoch, shuffle=True):
self.dataset.set_epoch(epoch)
--
Gitblit v1.9.1