From 2a66366be4c2715870e4859fd5a5db6e8a9dc00a Mon Sep 17 00:00:00 2001
From: chenmengzheAAA <123789350+chenmengzheAAA@users.noreply.github.com>
Date: 星期四, 14 九月 2023 19:00:17 +0800
Subject: [PATCH] Merge pull request #956 from alibaba-damo-academy/chenmengzheAAA-patch-4
---
funasr/bin/asr_train.py | 9 ++++++---
1 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/funasr/bin/asr_train.py b/funasr/bin/asr_train.py
index 38a42b3..8161e7b 100755
--- a/funasr/bin/asr_train.py
+++ b/funasr/bin/asr_train.py
@@ -36,6 +36,8 @@
from funasr.tasks.asr import ASRTaskParaformer as ASRTask
if args.mode == "uniasr":
from funasr.tasks.asr import ASRTaskUniASR as ASRTask
+ if args.mode == "rnnt":
+ from funasr.tasks.asr import ASRTransducerTask as ASRTask
ASRTask.main(args=args, cmd=cmd)
@@ -44,7 +46,8 @@
args = parse_args()
# setup local gpu_id
- os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
+ if args.ngpu > 0:
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
# DDP settings
if args.ngpu > 1:
@@ -55,9 +58,9 @@
# re-compute batch size: when dataset type is small
if args.dataset_type == "small":
- if args.batch_size is not None:
+ if args.batch_size is not None and args.ngpu > 0:
args.batch_size = args.batch_size * args.ngpu
- if args.batch_bins is not None:
+ if args.batch_bins is not None and args.ngpu > 0:
args.batch_bins = args.batch_bins * args.ngpu
main(args=args)
--
Gitblit v1.9.1