From 189b51d42bd29032091f1e29ae5585eb52c0af57 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 24 四月 2023 18:58:37 +0800
Subject: [PATCH] update

---
 funasr/datasets/small_datasets/sequence_iter_factory.py |    2 +-
 funasr/bin/train.py                                     |    7 +++++++
 egs/aishell/paraformer/run.sh                           |    1 -
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/egs/aishell/paraformer/run.sh b/egs/aishell/paraformer/run.sh
index 60078a0..ed6ddfe 100755
--- a/egs/aishell/paraformer/run.sh
+++ b/egs/aishell/paraformer/run.sh
@@ -180,7 +180,6 @@
                 --input_size $feats_dim \
                 --ngpu $gpu_num \
                 --num_worker_count $count \
-                --multiprocessing_distributed true \
                 --dist_init_method $init_method \
                 --dist_world_size $world_size \
                 --dist_rank $rank \
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index 474e857..7a9986c 100755
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -77,6 +77,12 @@
         help="Whether to use the find_unused_parameters in "
              "torch.nn.parallel.DistributedDataParallel ",
     )
+    parser.add_argument(
+        "--gpu_id",
+        type=int,
+        default=0,
+        help="local gpu id.",
+    )
 
     # cudnn related
     parser.add_argument(
@@ -399,6 +405,7 @@
     torch.backends.cudnn.deterministic = args.cudnn_deterministic
 
     # ddp init
+    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
     args.distributed = args.dist_world_size > 1
     distributed_option = build_distributed(args)
 
diff --git a/funasr/datasets/small_datasets/sequence_iter_factory.py b/funasr/datasets/small_datasets/sequence_iter_factory.py
index 7078943..810389e 100644
--- a/funasr/datasets/small_datasets/sequence_iter_factory.py
+++ b/funasr/datasets/small_datasets/sequence_iter_factory.py
@@ -62,7 +62,7 @@
         # sampler
         dataset_conf = args.dataset_conf
         batch_sampler = LengthBatchSampler(
-            batch_bins=dataset_conf["batch_size"],
+            batch_bins=dataset_conf["batch_size"] * args.ngpu,
             shape_files=shape_files,
             sort_in_batch=dataset_conf["sort_in_batch"] if hasattr(dataset_conf, "sort_in_batch") else "descending",
             sort_batch=dataset_conf["sort_batch"] if hasattr(dataset_conf, "sort_batch") else "ascending",

--
Gitblit v1.9.1