From 4ebde3c4ac27c15ff39ffbd5aa601035d189497a Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 19 二月 2024 18:42:35 +0800
Subject: [PATCH] aishell example

---
 examples/aishell/paraformer/utils/compute_wer.py              |  157 ++++++++++++++++++++++++++
 examples/aishell/paraformer/run.sh                            |  175 +++++++++++++---------------
 examples/industrial_data_pretraining/paraformer/finetune.sh   |   11 -
 examples/industrial_data_pretraining/paraformer/infer_demo.sh |    3 
 4 files changed, 245 insertions(+), 101 deletions(-)

diff --git a/examples/aishell/paraformer/run.sh b/examples/aishell/paraformer/run.sh
index 3f485c2..410751a 100755
--- a/examples/aishell/paraformer/run.sh
+++ b/examples/aishell/paraformer/run.sh
@@ -39,23 +39,14 @@
 valid_set=dev
 test_sets="dev test"
 
-asr_config=train_asr_paraformer_conformer_12e_6d_2048_256.yaml
-model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}"
+config=train_asr_paraformer_conformer_12e_6d_2048_256.yaml
+model_dir="baseline_$(basename "${config}" .yaml)_${lang}_${token_type}_${tag}"
 
-#inference_config=conf/decode_asr_transformer_noctc_1best.yaml
-#inference_asr_model=valid.acc.ave_10best.pb
+inference_device="cuda" #"cpu"
+inference_checkpoint="model.pt"
+inference_scp="wav.scp"
 
-## you can set gpu num for decoding here
-#gpuid_list=$CUDA_VISIBLE_DEVICES  # set gpus for decoding, the same as training stage by default
-#ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
-#
-#if ${gpu_inference}; then
-#    inference_nj=$[${ngpu}*${njob}]
-#    _ngpu=1
-#else
-#    inference_nj=$njob
-#    _ngpu=0
-#fi
+
 
 if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
     echo "stage -1: Data Download"
@@ -85,10 +76,10 @@
 
 if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
     echo "stage 1: Feature and CMVN Generation"
-#    utils/compute_cmvn.sh --fbankdir ${feats_dir}/data/${train_set} --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --config_file "$asr_config" --scale 1.0
+#    utils/compute_cmvn.sh --fbankdir ${feats_dir}/data/${train_set} --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --config_file "$config" --scale 1.0
     python ../../../funasr/bin/compute_audio_cmvn.py \
     --config-path "${workspace}" \
-    --config-name "${asr_config}" \
+    --config-name "${config}" \
     ++train_data_set_list="${feats_dir}/data/${train_set}/audio_datasets.jsonl" \
     ++cmvn_file="${feats_dir}/data/${train_set}/cmvn.json" \
     ++dataset_conf.num_workers=$nj
@@ -116,90 +107,84 @@
 
 # ASR Training Stage
 if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
-echo "stage 4: ASR Training"
+  echo "stage 4: ASR Training"
 
+  log_file="${exp_dir}/exp/${model_dir}/train.log.txt"
+  echo "log_file: ${log_file}"
   torchrun \
   --nnodes 1 \
   --nproc_per_node ${gpu_num} \
   ../../../funasr/bin/train.py \
   --config-path "${workspace}" \
-  --config-name "${asr_config}" \
+  --config-name "${config}" \
   ++train_data_set_list="${feats_dir}/data/${train_set}/audio_datasets.jsonl" \
-  ++cmvn_file="${feats_dir}/data/${train_set}/am.mvn" \
-  ++token_list="${token_list}" \
-  ++output_dir="${exp_dir}/exp/${model_dir}"
+  ++tokenizer_conf.token_list="${token_list}" \
+  ++frontend_conf.cmvn_file="${feats_dir}/data/${train_set}/am.mvn" \
+  ++output_dir="${exp_dir}/exp/${model_dir}" &> ${log_file}
 fi
 
-#
-## Testing Stage
-#if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
-#    echo "stage 5: Inference"
-#    for dset in ${test_sets}; do
-#        asr_exp=${exp_dir}/exp/${model_dir}
-#        inference_tag="$(basename "${inference_config}" .yaml)"
-#        _dir="${asr_exp}/${inference_tag}/${inference_asr_model}/${dset}"
-#        _logdir="${_dir}/logdir"
-#        if [ -d ${_dir} ]; then
-#            echo "${_dir} is already exists. if you want to decode again, please delete this dir first."
-#            exit 0
-#        fi
-#        mkdir -p "${_logdir}"
-#        _data="${feats_dir}/data/${dset}"
-#        key_file=${_data}/${scp}
-#        num_scp_file="$(<${key_file} wc -l)"
-#        _nj=$([ $inference_nj -le $num_scp_file ] && echo "$inference_nj" || echo "$num_scp_file")
-#        split_scps=
-#        for n in $(seq "${_nj}"); do
-#            split_scps+=" ${_logdir}/keys.${n}.scp"
-#        done
-#        # shellcheck disable=SC2086
-#        utils/split_scp.pl "${key_file}" ${split_scps}
-#        _opts=
-#        if [ -n "${inference_config}" ]; then
-#            _opts+="--config ${inference_config} "
-#        fi
-#        ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
-#            python -m funasr.bin.asr_inference_launch \
-#                --batch_size 1 \
-#                --ngpu "${_ngpu}" \
-#                --njob ${njob} \
-#                --gpuid_list ${gpuid_list} \
-#                --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
-#                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/am.mvn \
-#                --key_file "${_logdir}"/keys.JOB.scp \
-#                --asr_train_config "${asr_exp}"/config.yaml \
-#                --asr_model_file "${asr_exp}"/"${inference_asr_model}" \
-#                --output_dir "${_logdir}"/output.JOB \
-#                --mode paraformer \
-#                ${_opts}
-#
-#        for f in token token_int score text; do
-#            if [ -f "${_logdir}/output.1/1best_recog/${f}" ]; then
-#                for i in $(seq "${_nj}"); do
-#                    cat "${_logdir}/output.${i}/1best_recog/${f}"
-#                done | sort -k1 >"${_dir}/${f}"
-#            fi
-#        done
-#        python utils/proce_text.py ${_dir}/text ${_dir}/text.proc
-#        python utils/proce_text.py ${_data}/text ${_data}/text.proc
-#        python utils/compute_wer.py ${_data}/text.proc ${_dir}/text.proc ${_dir}/text.cer
-#        tail -n 3 ${_dir}/text.cer > ${_dir}/text.cer.txt
-#        cat ${_dir}/text.cer.txt
-#    done
-#fi
-#
-## Prepare files for ModelScope fine-tuning and inference
-#if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
-#    echo "stage 6: ModelScope Preparation"
-#    cp ${feats_dir}/data/${train_set}/cmvn/am.mvn ${exp_dir}/exp/${model_dir}/am.mvn
-#    vocab_size=$(cat ${token_list} | wc -l)
-#    python utils/gen_modelscope_configuration.py \
-#        --am_model_name $inference_asr_model \
-#        --mode paraformer \
-#        --model_name paraformer \
-#        --dataset aishell \
-#        --output_dir $exp_dir/exp/$model_dir \
-#        --vocab_size $vocab_size \
-#        --nat _nat \
-#        --tag $tag
-#fi
\ No newline at end of file
+
+
+# Testing Stage
+if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
+  echo "stage 5: Inference"
+
+  if ${inference_device} == "cuda"; then
+      nj=$(echo CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+  else
+      nj=$njob
+      batch_size=1
+      gpuid_list=""
+      for JOB in $(seq ${nj}); do
+          gpuid_list=CUDA_VISIBLE_DEVICES"-1,"
+      done
+  fi
+
+  for dset in ${test_sets}; do
+
+    inference_dir="${asr_exp}/${inference_checkpoint}/${dset}"
+    _logdir="${inference_dir}/logdir"
+
+    mkdir -p "${_logdir}"
+    data_dir="${feats_dir}/data/${dset}"
+    key_file=${data_dir}/${inference_scp}
+
+    split_scps=
+    for JOB in $(seq "${nj}"); do
+        split_scps+=" ${_logdir}/keys.${JOB}.scp"
+    done
+    utils/split_scp.pl "${key_file}" ${split_scps}
+
+    for JOB in $(seq ${nj}); do
+        {
+          python ../../../funasr/bin/inference.py \
+          --config-path="${exp_dir}/exp/${model_dir}" \
+          --config-name="config.yaml" \
+          ++init_param="${exp_dir}/exp/${model_dir}/${inference_checkpoint}" \
+          ++tokenizer_conf.token_list="${token_list}" \
+          ++frontend_conf.cmvn_file="${feats_dir}/data/${train_set}/am.mvn" \
+          ++input="${_logdir}/keys.${JOB}.scp" \
+          ++output_dir="${inference_dir}/${JOB}" \
+          ++device="${inference_device}"
+        }&
+
+    done
+    wait
+
+    mkdir -p ${inference_dir}/1best_recog
+    for f in token score text; do
+        if [ -f "${inference_dir}/${JOB}/1best_recog/${f}" ]; then
+          for JOB in $(seq "${nj}"); do
+              cat "${inference_dir}/${JOB}/1best_recog/${f}"
+          done | sort -k1 >"${inference_dir}/1best_recog/${f}"
+        fi
+    done
+
+    echo "Computing WER ..."
+    cp ${inference_dir}/1best_recog/text ${inference_dir}/1best_recog/text.proc
+    cp ${data_dir}/text ${inference_dir}/1best_recog/text.ref
+    python utils/compute_wer.py ${inference_dir}/1best_recog/text.ref ${inference_dir}/1best_recog/text.proc ${inference_dir}/1best_recog/text.cer
+    tail -n 3 ${inference_dir}/1best_recog/text.cer
+  done
+
+fi
diff --git a/examples/aishell/paraformer/utils/compute_wer.py b/examples/aishell/paraformer/utils/compute_wer.py
new file mode 100755
index 0000000..26a9f49
--- /dev/null
+++ b/examples/aishell/paraformer/utils/compute_wer.py
@@ -0,0 +1,157 @@
+import os
+import numpy as np
+import sys
+
+def compute_wer(ref_file,
+                hyp_file,
+                cer_detail_file):
+    rst = {
+        'Wrd': 0,
+        'Corr': 0,
+        'Ins': 0,
+        'Del': 0,
+        'Sub': 0,
+        'Snt': 0,
+        'Err': 0.0,
+        'S.Err': 0.0,
+        'wrong_words': 0,
+        'wrong_sentences': 0
+    }
+
+    hyp_dict = {}
+    ref_dict = {}
+    with open(hyp_file, 'r') as hyp_reader:
+        for line in hyp_reader:
+            key = line.strip().split()[0]
+            value = line.strip().split()[1:]
+            hyp_dict[key] = value
+    with open(ref_file, 'r') as ref_reader:
+        for line in ref_reader:
+            key = line.strip().split()[0]
+            value = line.strip().split()[1:]
+            ref_dict[key] = value
+
+    cer_detail_writer = open(cer_detail_file, 'w')
+    for hyp_key in hyp_dict:
+        if hyp_key in ref_dict:
+           out_item = compute_wer_by_line(hyp_dict[hyp_key], ref_dict[hyp_key])
+           rst['Wrd'] += out_item['nwords']
+           rst['Corr'] += out_item['cor']
+           rst['wrong_words'] += out_item['wrong']
+           rst['Ins'] += out_item['ins']
+           rst['Del'] += out_item['del']
+           rst['Sub'] += out_item['sub']
+           rst['Snt'] += 1
+           if out_item['wrong'] > 0:
+               rst['wrong_sentences'] += 1
+           cer_detail_writer.write(hyp_key + print_cer_detail(out_item) + '\n')
+           cer_detail_writer.write("ref:" + '\t' + " ".join(list(map(lambda x: x.lower(), ref_dict[hyp_key]))) + '\n')
+           cer_detail_writer.write("hyp:" + '\t' + " ".join(list(map(lambda x: x.lower(), hyp_dict[hyp_key]))) + '\n')
+
+    if rst['Wrd'] > 0:
+        rst['Err'] = round(rst['wrong_words'] * 100 / rst['Wrd'], 2)
+    if rst['Snt'] > 0:
+        rst['S.Err'] = round(rst['wrong_sentences'] * 100 / rst['Snt'], 2)
+
+    cer_detail_writer.write('\n')
+    cer_detail_writer.write("%WER " + str(rst['Err']) + " [ " + str(rst['wrong_words'])+ " / " + str(rst['Wrd']) +
+                            ", " + str(rst['Ins']) + " ins, " + str(rst['Del']) + " del, " + str(rst['Sub']) + " sub ]" + '\n')
+    cer_detail_writer.write("%SER " + str(rst['S.Err']) + " [ " + str(rst['wrong_sentences']) + " / " + str(rst['Snt']) + " ]" + '\n')
+    cer_detail_writer.write("Scored " + str(len(hyp_dict)) + " sentences, " + str(len(hyp_dict) - rst['Snt']) + " not present in hyp." + '\n')
+
+     
+def compute_wer_by_line(hyp,
+                        ref):
+    hyp = list(map(lambda x: x.lower(), hyp))
+    ref = list(map(lambda x: x.lower(), ref))
+
+    len_hyp = len(hyp)
+    len_ref = len(ref)
+
+    cost_matrix = np.zeros((len_hyp + 1, len_ref + 1), dtype=np.int16)
+
+    ops_matrix = np.zeros((len_hyp + 1, len_ref + 1), dtype=np.int8)
+
+    for i in range(len_hyp + 1):
+        cost_matrix[i][0] = i
+    for j in range(len_ref + 1):
+        cost_matrix[0][j] = j
+
+    for i in range(1, len_hyp + 1):
+        for j in range(1, len_ref + 1):
+            if hyp[i - 1] == ref[j - 1]:
+                cost_matrix[i][j] = cost_matrix[i - 1][j - 1]
+            else:
+                substitution = cost_matrix[i - 1][j - 1] + 1
+                insertion = cost_matrix[i - 1][j] + 1
+                deletion = cost_matrix[i][j - 1] + 1
+
+                compare_val = [substitution, insertion, deletion]
+
+                min_val = min(compare_val)
+                operation_idx = compare_val.index(min_val) + 1
+                cost_matrix[i][j] = min_val
+                ops_matrix[i][j] = operation_idx
+
+    match_idx = []
+    i = len_hyp
+    j = len_ref
+    rst = {
+        'nwords': len_ref,
+        'cor': 0,
+        'wrong': 0,
+        'ins': 0,
+        'del': 0,
+        'sub': 0
+    }
+    while i >= 0 or j >= 0:
+        i_idx = max(0, i)
+        j_idx = max(0, j)
+
+        if ops_matrix[i_idx][j_idx] == 0:  # correct
+            if i - 1 >= 0 and j - 1 >= 0:
+                match_idx.append((j - 1, i - 1))
+                rst['cor'] += 1
+
+            i -= 1
+            j -= 1
+
+        elif ops_matrix[i_idx][j_idx] == 2:  # insert
+            i -= 1
+            rst['ins'] += 1
+
+        elif ops_matrix[i_idx][j_idx] == 3:  # delete
+            j -= 1
+            rst['del'] += 1
+
+        elif ops_matrix[i_idx][j_idx] == 1:  # substitute
+            i -= 1
+            j -= 1
+            rst['sub'] += 1
+
+        if i < 0 and j >= 0:
+            rst['del'] += 1
+        elif j < 0 and i >= 0:
+            rst['ins'] += 1
+
+    match_idx.reverse()
+    wrong_cnt = cost_matrix[len_hyp][len_ref]
+    rst['wrong'] = wrong_cnt
+
+    return rst
+
+def print_cer_detail(rst):
+    return ("(" + "nwords=" + str(rst['nwords']) + ",cor=" + str(rst['cor'])
+            + ",ins=" + str(rst['ins']) + ",del=" + str(rst['del']) + ",sub="
+            + str(rst['sub']) + ") corr:" + '{:.2%}'.format(rst['cor']/rst['nwords'])
+            + ",cer:" + '{:.2%}'.format(rst['wrong']/rst['nwords']))
+
+if __name__ == '__main__':
+    if len(sys.argv) != 4:
+        print("usage : python compute-wer.py test.ref test.hyp test.wer")
+        sys.exit(0)
+
+    ref_file = sys.argv[1]
+    hyp_file = sys.argv[2]
+    cer_detail_file = sys.argv[3]
+    compute_wer(ref_file, hyp_file, cer_detail_file)
diff --git a/examples/industrial_data_pretraining/paraformer/finetune.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
index 394861b..8bdd8da 100644
--- a/examples/industrial_data_pretraining/paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -6,10 +6,10 @@
 #git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
 
 ## generate jsonl from wav.scp and text.txt
-python funasr/datasets/audio_datasets/scp2jsonl.py \
-++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+#python funasr/datasets/audio_datasets/scp2jsonl.py \
+#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
+#++data_type_list='["source", "target"]' \
+#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
 
 
 # torchrun \
@@ -24,5 +24,4 @@
 ++dataset_conf.batch_type="example" \
 ++train_conf.max_epoch=2 \
 ++dataset_conf.num_workers=4 \
-+output_dir="outputs/debug/ckpt/funasr2/exp2" \
-+debug="true"
\ No newline at end of file
++output_dir="outputs/debug/ckpt/funasr2/exp2"
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/paraformer/infer.sh b/examples/industrial_data_pretraining/paraformer/infer_demo.sh
similarity index 98%
rename from examples/industrial_data_pretraining/paraformer/infer.sh
rename to examples/industrial_data_pretraining/paraformer/infer_demo.sh
index 7491e98..f9a03f9 100644
--- a/examples/industrial_data_pretraining/paraformer/infer.sh
+++ b/examples/industrial_data_pretraining/paraformer/infer_demo.sh
@@ -9,3 +9,6 @@
 +output_dir="./outputs/debug" \
 +device="cpu" \
 
+
+
+

--
Gitblit v1.9.1