From 4ace5a95b052d338947fc88809a440ccd55cf6b4 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 16 十一月 2023 16:39:52 +0800
Subject: [PATCH] funasr pages

---
 funasr/bin/build_trainer.py |   23 +++++++++++++++++------
 1 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/funasr/bin/build_trainer.py b/funasr/bin/build_trainer.py
index 7979d25..bda83ec 100644
--- a/funasr/bin/build_trainer.py
+++ b/funasr/bin/build_trainer.py
@@ -532,11 +532,9 @@
     args = build_args(args, parser, extra_task_params)
 
     if args.local_rank is not None:
-        args.distributed = True
-        args.simple_ddp = True
+        distributed = True
     else:
-        args.distributed = False
-        args.ngpu = 1
+        distributed = False
     args.local_rank = args.local_rank if args.local_rank is not None else 0
     local_rank = args.local_rank
     if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
@@ -550,6 +548,10 @@
     init_param = modelscope_dict['init_model']
     cmvn_file = modelscope_dict['cmvn_file']
     seg_dict_file = modelscope_dict['seg_dict']
+    if 'bpemodel' in modelscope_dict:
+        bpemodel = modelscope_dict['bpemodel']
+    else:
+        bpemodel = None
 
     # overwrite parameters
     with open(config) as f:
@@ -583,6 +585,10 @@
         args.seg_dict_file = seg_dict_file
     else:
         args.seg_dict_file = None
+    if bpemodel is not None and os.path.exists(bpemodel):
+        args.bpemodel = bpemodel
+    else:
+        args.bpemodel = None
     args.data_dir = data_dir
     args.train_set = train_set
     args.dev_set = dev_set
@@ -593,6 +599,12 @@
     args.batch_type = "length"
     args.oss_bucket = None
     args.input_size = None
+    if distributed:
+        args.distributed = True
+        args.simple_ddp = True
+    else:
+        args.distributed = False
+        args.ngpu = 1
     if optim is not None:
         args.optim = optim
     if lr is not None:
@@ -610,6 +622,7 @@
     if batch_bins is not None:
         if args.dataset_type == "small":
             args.batch_bins = batch_bins
+            args.dataset_conf["batch_conf"]["batch_size"] = batch_bins
         elif args.dataset_type == "large":
             args.dataset_conf["batch_conf"]["batch_size"] = batch_bins
         else:
@@ -627,8 +640,6 @@
     torch.backends.cudnn.deterministic = args.cudnn_deterministic
 
     # ddp init
-    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
-    args.distributed = args.ngpu > 1 or args.dist_world_size > 1
     distributed_option = build_distributed(args)
 
     # for logging

--
Gitblit v1.9.1