From 28a19dbc4e85d3b8a4ec2ef7483bba64d422b43f Mon Sep 17 00:00:00 2001
From: aky15 <ankeyu.aky@11.17.44.249>
Date: 星期三, 12 四月 2023 18:03:06 +0800
Subject: [PATCH] Merge remote-tracking branch 'origin/main' into dev_aky
---
funasr/tasks/abs_task.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 47 insertions(+), 10 deletions(-)
diff --git a/funasr/tasks/abs_task.py b/funasr/tasks/abs_task.py
index 02311fd..e70b062 100644
--- a/funasr/tasks/abs_task.py
+++ b/funasr/tasks/abs_task.py
@@ -71,7 +71,7 @@
from funasr.utils.types import str2triple_str
from funasr.utils.types import str_or_int
from funasr.utils.types import str_or_none
-from funasr.utils.wav_utils import calc_shape, generate_data_list
+from funasr.utils.wav_utils import calc_shape, generate_data_list, filter_wav_text
from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
try:
@@ -464,6 +464,12 @@
default=sys.maxsize,
help="The maximum number update step to train",
)
+ parser.add_argument(
+ "--batch_interval",
+ type=int,
+ default=10000,
+ help="The batch interval for saving model.",
+ )
group.add_argument(
"--patience",
type=int_or_none,
@@ -639,12 +645,12 @@
"and exclude_keys excludes keys of model states for the initialization."
"e.g.\n"
" # Load all parameters"
- " --init_param some/where/model.pth\n"
+ " --init_param some/where/model.pb\n"
" # Load only decoder parameters"
- " --init_param some/where/model.pth:decoder:decoder\n"
+ " --init_param some/where/model.pb:decoder:decoder\n"
" # Load only decoder parameters excluding decoder.embed"
- " --init_param some/where/model.pth:decoder:decoder:decoder.embed\n"
- " --init_param some/where/model.pth:decoder:decoder:decoder.embed\n",
+ " --init_param some/where/model.pb:decoder:decoder:decoder.embed\n"
+ " --init_param some/where/model.pb:decoder:decoder:decoder.embed\n",
)
group.add_argument(
"--ignore_init_mismatch",
@@ -1153,6 +1159,14 @@
if args.batch_bins is not None:
args.batch_bins = args.batch_bins * args.ngpu
+ # filter samples if wav.scp and text are mismatch
+ if (args.train_shape_file is None and args.dataset_type == "small") or args.train_data_file is None and args.dataset_type == "large":
+ if not args.simple_ddp or distributed_option.dist_rank == 0:
+ filter_wav_text(args.data_dir, args.train_set)
+ filter_wav_text(args.data_dir, args.dev_set)
+ if args.simple_ddp:
+ dist.barrier()
+
if args.train_shape_file is None and args.dataset_type == "small":
if not args.simple_ddp or distributed_option.dist_rank == 0:
calc_shape(args.data_dir, args.train_set, args.frontend_conf, args.speech_length_min, args.speech_length_max)
@@ -1185,12 +1199,18 @@
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
+ # BUGFIX: Remove previous handlers and reset log level
+ for handler in logging.root.handlers[:]:
+ logging.root.removeHandler(handler)
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
+ # BUGFIX: Remove previous handlers and reset log level
+ for handler in logging.root.handlers[:]:
+ logging.root.removeHandler(handler)
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
@@ -1340,12 +1360,16 @@
if args.dataset_type == "large":
from funasr.datasets.large_datasets.build_dataloader import ArkDataLoader
train_iter_factory = ArkDataLoader(args.train_data_file, args.token_list, args.dataset_conf,
- seg_dict_file=args.seg_dict_file if hasattr(args,
- "seg_dict_file") else None,
+ frontend_conf=args.frontend_conf if hasattr(args, "frontend_conf") else None,
+ seg_dict_file=args.seg_dict_file if hasattr(args, "seg_dict_file") else None,
+ punc_dict_file=args.punc_list if hasattr(args, "punc_list") else None,
+ bpemodel_file=args.bpemodel if hasattr(args, "bpemodel") else None,
mode="train")
- valid_iter_factory = ArkDataLoader(args.valid_data_file, args.token_list, args.dataset_conf,
- seg_dict_file=args.seg_dict_file if hasattr(args,
- "seg_dict_file") else None,
+ valid_iter_factory = ArkDataLoader(args.valid_data_file, args.token_list, args.dataset_conf,
+ frontend_conf=args.frontend_conf if hasattr(args, "frontend_conf") else None,
+ seg_dict_file=args.seg_dict_file if hasattr(args, "seg_dict_file") else None,
+ punc_dict_file=args.punc_list if hasattr(args, "punc_list") else None,
+ bpemodel_file=args.bpemodel if hasattr(args, "bpemodel") else None,
mode="eval")
elif args.dataset_type == "small":
train_iter_factory = cls.build_iter_factory(
@@ -1558,12 +1582,23 @@
) -> AbsIterFactory:
assert check_argument_types()
+ if hasattr(args, "frontend_conf"):
+ if args.frontend_conf is not None and "fs" in args.frontend_conf:
+ dest_sample_rate = args.frontend_conf["fs"]
+ else:
+ dest_sample_rate = 16000
+
dataset = ESPnetDataset(
iter_options.data_path_and_name_and_type,
float_dtype=args.train_dtype,
preprocess=iter_options.preprocess_fn,
max_cache_size=iter_options.max_cache_size,
max_cache_fd=iter_options.max_cache_fd,
+<<<<<<< HEAD
+ dest_sample_rate=args.frontend_conf["fs"] if args.frontend_conf else 16000,
+=======
+ dest_sample_rate=dest_sample_rate,
+>>>>>>> main
)
cls.check_task_requirements(
dataset, args.allow_variable_data_keys, train=iter_options.train
@@ -1835,6 +1870,7 @@
key_file: str = None,
batch_size: int = 1,
fs: dict = None,
+ mc: bool = False,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
@@ -1853,6 +1889,7 @@
data_path_and_name_and_type,
float_dtype=dtype,
fs=fs,
+ mc=mc,
preprocess=preprocess_fn,
key_file=key_file,
)
--
Gitblit v1.9.1