From 9be8a443d74d68f179de88fff13b4e8424579d7b Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 10 三月 2023 18:24:39 +0800
Subject: [PATCH] Merge pull request #207 from alibaba-damo-academy/dev_dzh
---
funasr/tasks/abs_task.py | 65 +++++++++++++++++++++++++++++++-
1 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/funasr/tasks/abs_task.py b/funasr/tasks/abs_task.py
index 7899400..723a67c 100644
--- a/funasr/tasks/abs_task.py
+++ b/funasr/tasks/abs_task.py
@@ -43,6 +43,7 @@
from funasr.iterators.chunk_iter_factory import ChunkIterFactory
from funasr.iterators.multiple_iter_factory import MultipleIterFactory
from funasr.iterators.sequence_iter_factory import SequenceIterFactory
+from funasr.main_funcs.collect_stats import collect_stats
from funasr.optimizers.sgd import SGD
from funasr.optimizers.fairseq_adam import FairseqAdam
from funasr.samplers.build_batch_sampler import BATCH_TYPES
@@ -70,7 +71,7 @@
from funasr.utils.types import str2triple_str
from funasr.utils.types import str_or_int
from funasr.utils.types import str_or_none
-from funasr.utils.wav_utils import calc_shape, generate_data_list
+from funasr.utils.wav_utils import calc_shape, generate_data_list, filter_wav_text
from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
try:
@@ -1152,6 +1153,14 @@
if args.batch_bins is not None:
args.batch_bins = args.batch_bins * args.ngpu
+ # filter samples if wav.scp and text are mismatch
+ if (args.train_shape_file is None and args.dataset_type == "small") or args.train_data_file is None and args.dataset_type == "large":
+ if not args.simple_ddp or distributed_option.dist_rank == 0:
+ filter_wav_text(args.data_dir, args.train_set)
+ filter_wav_text(args.data_dir, args.dev_set)
+ if args.simple_ddp:
+ dist.barrier()
+
if args.train_shape_file is None and args.dataset_type == "small":
if not args.simple_ddp or distributed_option.dist_rank == 0:
calc_shape(args.data_dir, args.train_set, args.frontend_conf, args.speech_length_min, args.speech_length_max)
@@ -1272,6 +1281,52 @@
if args.dry_run:
pass
+ elif args.collect_stats:
+ # Perform on collect_stats mode. This mode has two roles
+ # - Derive the length and dimension of all input data
+ # - Accumulate feats, square values, and the length for whitening
+
+ if args.valid_batch_size is None:
+ args.valid_batch_size = args.batch_size
+
+ if len(args.train_shape_file) != 0:
+ train_key_file = args.train_shape_file[0]
+ else:
+ train_key_file = None
+ if len(args.valid_shape_file) != 0:
+ valid_key_file = args.valid_shape_file[0]
+ else:
+ valid_key_file = None
+
+ collect_stats(
+ model=model,
+ train_iter=cls.build_streaming_iterator(
+ data_path_and_name_and_type=args.train_data_path_and_name_and_type,
+ key_file=train_key_file,
+ batch_size=args.batch_size,
+ dtype=args.train_dtype,
+ num_workers=args.num_workers,
+ allow_variable_data_keys=args.allow_variable_data_keys,
+ ngpu=args.ngpu,
+ preprocess_fn=cls.build_preprocess_fn(args, train=False),
+ collate_fn=cls.build_collate_fn(args, train=False),
+ ),
+ valid_iter=cls.build_streaming_iterator(
+ data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
+ key_file=valid_key_file,
+ batch_size=args.valid_batch_size,
+ dtype=args.train_dtype,
+ num_workers=args.num_workers,
+ allow_variable_data_keys=args.allow_variable_data_keys,
+ ngpu=args.ngpu,
+ preprocess_fn=cls.build_preprocess_fn(args, train=False),
+ collate_fn=cls.build_collate_fn(args, train=False),
+ ),
+ output_dir=output_dir,
+ ngpu=args.ngpu,
+ log_interval=args.log_interval,
+ write_collected_feats=args.write_collected_feats,
+ )
else:
logging.info("Training args: {}".format(args))
# 6. Loads pre-trained model
@@ -1293,12 +1348,16 @@
if args.dataset_type == "large":
from funasr.datasets.large_datasets.build_dataloader import ArkDataLoader
train_iter_factory = ArkDataLoader(args.train_data_file, args.token_list, args.dataset_conf,
+ frontend_conf=args.frontend_conf if hasattr(args, "frontend_conf") else None,
seg_dict_file=args.seg_dict_file if hasattr(args,
"seg_dict_file") else None,
+ punc_dict_file=args.punc_list if hasattr(args, "punc_list") else None,
mode="train")
- valid_iter_factory = ArkDataLoader(args.valid_data_file, args.token_list, args.dataset_conf,
+ valid_iter_factory = ArkDataLoader(args.valid_data_file, args.token_list, args.dataset_conf,
+ frontend_conf=args.frontend_conf if hasattr(args, "frontend_conf") else None,
seg_dict_file=args.seg_dict_file if hasattr(args,
"seg_dict_file") else None,
+ punc_dict_file=args.punc_list if hasattr(args, "punc_list") else None,
mode="eval")
elif args.dataset_type == "small":
train_iter_factory = cls.build_iter_factory(
@@ -1788,6 +1847,7 @@
key_file: str = None,
batch_size: int = 1,
fs: dict = None,
+ mc: bool = False,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
@@ -1806,6 +1866,7 @@
data_path_and_name_and_type,
float_dtype=dtype,
fs=fs,
+ mc=mc,
preprocess=preprocess_fn,
key_file=key_file,
)
--
Gitblit v1.9.1