From ccd4c4d240af6414c86af606e6ad9a01ac52e991 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期日, 23 四月 2023 17:47:12 +0800
Subject: [PATCH] update
---
funasr/bin/train.py | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 104 insertions(+), 8 deletions(-)
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index dbfebd7..c32a362 100644
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -1,14 +1,26 @@
+import argparse
import logging
import os
import sys
+from io import BytesIO
import torch
+from funasr.build_utils.build_args import build_args
+from funasr.build_utils.build_dataloader import build_dataloader
+from funasr.build_utils.build_distributed import build_distributed
+from funasr.build_utils.build_model import build_model
+from funasr.build_utils.build_optimizer import build_optimizer
+from funasr.build_utils.build_scheduler import build_scheduler
+from funasr.text.phoneme_tokenizer import g2p_choices
+from funasr.torch_utils.model_summary import model_summary
+from funasr.torch_utils.pytorch_version import pytorch_cudnn_version
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
from funasr.utils import config_argparse
-from funasr.utils.build_distributed import build_distributed
from funasr.utils.prepare_data import prepare_data
from funasr.utils.types import str2bool
+from funasr.utils.types import str_or_none
+from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
def get_parser():
@@ -25,6 +37,7 @@
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
+ parser.add_argument("--task_name", type=str, default="asr", help="Name for different tasks")
# ddp related
parser.add_argument(
@@ -263,6 +276,61 @@
action="append",
default=[],
)
+ parser.add_argument(
+ "--use_preprocessor",
+ type=str2bool,
+ default=True,
+ help="Apply preprocessing to data or not",
+ )
+
+ # most task related
+ parser.add_argument(
+ "--init",
+ type=lambda x: str_or_none(x.lower()),
+ default=None,
+ help="The initialization method",
+ choices=[
+ "chainer",
+ "xavier_uniform",
+ "xavier_normal",
+ "kaiming_uniform",
+ "kaiming_normal",
+ None,
+ ],
+ )
+ parser.add_argument(
+ "--token_list",
+ type=str_or_none,
+ default=None,
+ help="A text mapping int-id to token",
+ )
+ parser.add_argument(
+ "--token_type",
+ type=str,
+ default="bpe",
+ choices=["bpe", "char", "word"],
+ help="",
+ )
+ parser.add_argument(
+ "--bpemodel",
+ type=str_or_none,
+ default=None,
+ help="The model file fo sentencepiece",
+ )
+ parser.add_argument(
+ "--cleaner",
+ type=str_or_none,
+ choices=[None, "tacotron", "jaconv", "vietnamese"],
+ default=None,
+ help="Apply text cleaning",
+ )
+ parser.add_argument(
+ "--g2p",
+ type=str_or_none,
+ choices=g2p_choices,
+ default=None,
+ help="Specify g2p method if --token_type=phn",
+ )
# pai related
parser.add_argument(
@@ -321,10 +389,20 @@
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
+ task_args = build_args(args)
+ args = argparse.Namespace(**vars(args), **vars(task_args))
+
+ # set random seed
+ set_all_random_seed(args.seed)
+ torch.backends.cudnn.enabled = args.cudnn_enabled
+ torch.backends.cudnn.benchmark = args.cudnn_benchmark
+ torch.backends.cudnn.deterministic = args.cudnn_deterministic
# ddp init
args.distributed = args.dist_world_size > 1
distributed_option = build_distributed(args)
+
+ # for logging
if not distributed_option.distributed or distributed_option.dist_rank == 0:
logging.basicConfig(
level="INFO",
@@ -337,14 +415,32 @@
format=f"[{os.uname()[1].split('.')[0]}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
- logging.info("world size: {}, rank: {}, local_rank: {}".format(distributed_option.dist_world_size,
- distributed_option.dist_rank,
- distributed_option.local_rank))
# prepare files for dataloader
prepare_data(args, distributed_option)
- set_all_random_seed(args.seed)
- torch.backends.cudnn.enabled = args.cudnn_enabled
- torch.backends.cudnn.benchmark = args.cudnn_benchmark
- torch.backends.cudnn.deterministic = args.cudnn_deterministic
+ model = build_model(args)
+ optimizers = build_optimizer(args, model=model)
+ schedulers = build_scheduler(args, optimizers)
+
+ logging.info("world size: {}, rank: {}, local_rank: {}".format(distributed_option.dist_world_size,
+ distributed_option.dist_rank,
+ distributed_option.local_rank))
+ logging.info(pytorch_cudnn_version())
+ logging.info(model_summary(model))
+ logging.info("Optimizer: {}".format(optimizers))
+ logging.info("Scheduler: {}".format(schedulers))
+
+ # dump args to config.yaml
+ if not distributed_option.distributed or distributed_option.dist_rank == 0:
+ os.makedirs(args.output_dir, exist_ok=True)
+ with open(os.path.join(args.output_dir, "config.yaml"), "w") as f:
+ logging.info("Saving the configuration in {}/{}".format(args.output_dir, "config.yaml"))
+ if args.use_pai:
+ buffer = BytesIO()
+ torch.save({"config": vars(args)}, buffer)
+ args.oss_bucket.put_object(os.path.join(args.output_dir, "config.dict"), buffer.getvalue())
+ else:
+ yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
+
+ train_dataloader, valid_dataloader = build_dataloader(args)
--
Gitblit v1.9.1