From 189b51d42bd29032091f1e29ae5585eb52c0af57 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 24 四月 2023 18:58:37 +0800
Subject: [PATCH] update
---
funasr/bin/train.py | 17 ++++++++++++-----
1 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index 1518071..7a9986c 100755
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -19,7 +19,6 @@
from funasr.torch_utils.model_summary import model_summary
from funasr.torch_utils.pytorch_version import pytorch_cudnn_version
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
-from funasr.utils import config_argparse
from funasr.utils.prepare_data import prepare_data
from funasr.utils.types import str2bool
from funasr.utils.types import str_or_none
@@ -27,7 +26,7 @@
def get_parser():
- parser = config_argparse.ArgumentParser(
+ parser = argparse.ArgumentParser(
description="FunASR Common Training Parser",
)
@@ -77,6 +76,12 @@
default=False,
help="Whether to use the find_unused_parameters in "
"torch.nn.parallel.DistributedDataParallel ",
+ )
+ parser.add_argument(
+ "--gpu_id",
+ type=int,
+ default=0,
+ help="local gpu id.",
)
# cudnn related
@@ -388,9 +393,10 @@
if __name__ == '__main__':
parser = get_parser()
- args = parser.parse_args()
- task_args = build_args(args)
- args = argparse.Namespace(**vars(args), **vars(task_args))
+ args, extra_task_params = parser.parse_known_args()
+ if extra_task_params:
+ args = build_args(args, parser, extra_task_params)
+ # args = argparse.Namespace(**vars(args), **vars(task_args))
# set random seed
set_all_random_seed(args.seed)
@@ -399,6 +405,7 @@
torch.backends.cudnn.deterministic = args.cudnn_deterministic
# ddp init
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
args.distributed = args.dist_world_size > 1
distributed_option = build_distributed(args)
--
Gitblit v1.9.1