From 2a3336d9a8a9d74662be805510abdd0a29414ae5 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期二, 25 四月 2023 23:06:45 +0800
Subject: [PATCH] update
---
egs/aishell/conformer/run.sh | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/egs/aishell/conformer/run.sh b/egs/aishell/conformer/run.sh
index 09ddab8..60afbec 100755
--- a/egs/aishell/conformer/run.sh
+++ b/egs/aishell/conformer/run.sh
@@ -3,7 +3,7 @@
. ./path.sh || exit 1;
# machines configuration
-CUDA_VISIBLE_DEVICES="0,1"
+CUDA_VISIBLE_DEVICES="2,3"
gpu_num=2
count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
@@ -13,7 +13,7 @@
infer_cmd=utils/run.pl
# general configuration
-feats_dir="../DATA" #feature output dictionary
+feats_dir="/nfs/wangjiaming.wjm/Funasr_data/aishell-1-fix-cmvn" #feature output dictionary
exp_dir="."
lang=zh
dumpdir=dump/fbank
@@ -21,7 +21,7 @@
token_type=char
scp=feats.scp
type=kaldi_ark
-stage=0
+stage=3
stop_stage=4
# feature configuration
@@ -161,7 +161,8 @@
rank=$i
local_rank=$i
gpu_id=$(echo $CUDA_VISIBLE_DEVICES | cut -d',' -f$[$i+1])
- asr_train.py \
+ train.py \
+ --task_name asr \
--gpu_id $gpu_id \
--use_preprocessor true \
--token_type char \
@@ -177,7 +178,6 @@
--resume true \
--output_dir ${exp_dir}/exp/${model_dir} \
--config $asr_config \
- --input_size $feats_dim \
--ngpu $gpu_num \
--num_worker_count $count \
--multiprocessing_distributed true \
@@ -217,7 +217,7 @@
if [ -n "${inference_config}" ]; then
_opts+="--config ${inference_config} "
fi
- ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1: "${_nj}" "${_logdir}"/asr_inference.JOB.log \
+ ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
python -m funasr.bin.asr_inference_launch \
--batch_size 1 \
--ngpu "${_ngpu}" \
--
Gitblit v1.9.1