| | |
| | | from funasr.torch_utils.set_all_random_seed import set_all_random_seed |
| | | from funasr.utils.nested_dict_action import NestedDictAction |
| | | from funasr.utils.prepare_data import prepare_data |
| | | from funasr.utils.types import int_or_none |
| | | from funasr.utils.types import str2bool |
| | | from funasr.utils.types import str2triple_str |
| | | from funasr.utils.types import str_or_none |
| | | from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump |
| | | |
| | |
| | | ) |
| | | parser.add_argument( |
| | | "--dist_world_size", |
| | | default=None, |
| | | type=int, |
| | | default=1, |
| | | help="number of nodes for distributed training", |
| | | ) |
| | | parser.add_argument( |
| | | "--dist_rank", |
| | | type=int, |
| | | default=None, |
| | | help="node rank for distributed training", |
| | | ) |
| | | parser.add_argument( |
| | | "--local_rank", |
| | | type=int, |
| | | default=None, |
| | | help="local rank for distributed training", |
| | | ) |
| | | parser.add_argument( |
| | | "--dist_master_addr", |
| | | default=None, |
| | | type=str_or_none, |
| | | help="The master address for distributed training. " |
| | | "This value is used when dist_init_method == 'env://'", |
| | | ) |
| | | parser.add_argument( |
| | | "--dist_master_port", |
| | | default=None, |
| | | type=int_or_none, |
| | | help="The master port for distributed training" |
| | | "This value is used when dist_init_method == 'env://'", |
| | | ) |
| | | parser.add_argument( |
| | | "--dist_launcher", |
| | | default=None, |
| | | type=str_or_none, |
| | | choices=["slurm", "mpi", None], |
| | | help="The launcher type for distributed training", |
| | | ) |
| | | parser.add_argument( |
| | | "--multiprocessing_distributed", |
| | | default=True, |
| | | type=str2bool, |
| | | help="Use multi-processing distributed training to launch " |
| | | "N processes per node, which has N GPUs. This is the " |
| | | "fastest way to use PyTorch for either single node or " |
| | | "multi node data parallel training", |
| | | ) |
| | | parser.add_argument( |
| | | "--unused_parameters", |
| | |
| | | help="whether to use dataloader for large dataset", |
| | | ) |
| | | parser.add_argument( |
| | | "--dataset_conf", |
| | | action=NestedDictAction, |
| | | default=dict(), |
| | | help=f"The keyword arguments for dataset", |
| | | ) |
| | | parser.add_argument( |
| | | "--train_data_file", |
| | | type=str, |
| | | default=None, |
| | |
| | | ) |
| | | parser.add_argument( |
| | | "--train_data_path_and_name_and_type", |
| | | type=str2triple_str, |
| | | action="append", |
| | | default=[], |
| | | help="e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. ", |
| | | ) |
| | | parser.add_argument( |
| | | "--valid_data_path_and_name_and_type", |
| | | type=str2triple_str, |
| | | action="append", |
| | | default=[], |
| | | ) |
| | | parser.add_argument( |
| | | "--train_shape_file", |
| | | type=str, action="append", |
| | | type=str, |
| | | action="append", |
| | | default=[], |
| | | ) |
| | | parser.add_argument( |
| | |
| | | args, extra_task_params = parser.parse_known_args() |
| | | if extra_task_params: |
| | | args = build_args(args, parser, extra_task_params) |
| | | # args = argparse.Namespace(**vars(args), **vars(task_args)) |
| | | |
| | | # set random seed |
| | | set_all_random_seed(args.seed) |
| | |
| | | |
| | | # ddp init |
| | | os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id) |
| | | args.distributed = args.dist_world_size > 1 |
| | | args.distributed = args.ngpu > 1 or args.dist_world_size > 1 |
| | | distributed_option = build_distributed(args) |
| | | |
| | | # for logging |