From 95fc10961fa66b66ed92cdedc961801bb2330030 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期四, 29 六月 2023 14:31:44 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main
---
funasr/build_utils/build_asr_model.py | 5 +
funasr/version.txt | 2
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/demo.py | 12 ++
egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py | 3
funasr/runtime/html5/static/index.html | 4
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.sh | 105 +++++++++++++++++++++
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/utils | 1
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py | 1
tests/test_tp_pipeline.py | 30 ++++++
/dev/null | 53 ----------
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/README.md | 3
funasr/runtime/html5/static/main.js | 30 +++++
funasr/models/e2e_asr_transducer.py | 6 +
13 files changed, 192 insertions(+), 63 deletions(-)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/README.md b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/README.md
index eff933e..9a84f9b 100644
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/README.md
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/README.md
@@ -41,8 +41,7 @@
- Modify inference related parameters in `infer_after_finetune.py`
- <strong>output_dir:</strong> # result dir
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
- - <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave
- .pb`
+ - <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pb`
- Then you can run the pipeline to finetune with:
```python
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/demo.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/demo.py
new file mode 100644
index 0000000..7ca7118
--- /dev/null
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/demo.py
@@ -0,0 +1,12 @@
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+decoding_mode="normal" #fast, normal, offline
+inference_pipeline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model='damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online',
+ param_dict={"decoding_model": decoding_mode}
+)
+
+rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
+print(rec_result)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py
deleted file mode 100644
index 876d51c..0000000
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import os
-import shutil
-from multiprocessing import Pool
-
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-
-from funasr.utils.compute_wer import compute_wer
-
-
-def modelscope_infer_core(output_dir, split_dir, njob, idx):
- output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
- gpu_id = (int(idx) - 1) // njob
- if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
- gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
- else:
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
- inference_pipeline = pipeline(
- task=Tasks.auto_speech_recognition,
- model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online",
- output_dir=output_dir_job,
- batch_size=1
- )
- audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
- inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
-
-
-def modelscope_infer(params):
- # prepare for multi-GPU decoding
- ngpu = params["ngpu"]
- njob = params["njob"]
- output_dir = params["output_dir"]
- if os.path.exists(output_dir):
- shutil.rmtree(output_dir)
- os.mkdir(output_dir)
- split_dir = os.path.join(output_dir, "split")
- os.mkdir(split_dir)
- nj = ngpu * njob
- wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
- with open(wav_scp_file) as f:
- lines = f.readlines()
- num_lines = len(lines)
- num_job_lines = num_lines // nj
- start = 0
- for i in range(nj):
- end = start + num_job_lines
- file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
- with open(file, "w") as f:
- if i == nj - 1:
- f.writelines(lines[start:])
- else:
- f.writelines(lines[start:end])
- start = end
-
- p = Pool(nj)
- for i in range(nj):
- p.apply_async(modelscope_infer_core,
- args=(output_dir, split_dir, njob, str(i + 1)))
- p.close()
- p.join()
-
- # combine decoding results
- best_recog_path = os.path.join(output_dir, "1best_recog")
- os.mkdir(best_recog_path)
- files = ["text", "token", "score"]
- for file in files:
- with open(os.path.join(best_recog_path, file), "w") as f:
- for i in range(nj):
- job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
- with open(job_file) as f_job:
- lines = f_job.readlines()
- f.writelines(lines)
-
- # If text exists, compute CER
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(best_recog_path, "text")
- compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
-
-
-if __name__ == "__main__":
- params = {}
- params["data_dir"] = "./data/test"
- params["output_dir"] = "./results"
- params["ngpu"] = 1
- params["njob"] = 1
- modelscope_infer(params)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py
new file mode 120000
index 0000000..128fc31
--- /dev/null
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py
@@ -0,0 +1 @@
+../../TEMPLATE/infer.py
\ No newline at end of file
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.sh b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.sh
new file mode 100644
index 0000000..2d7a2da
--- /dev/null
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=1
+gpu_inference=false # whether to perform gpu decoding
+gpuid_list="-1" # set gpus, e.g., gpuid_list="0,1"
+njob=32 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
+decoding_mode="normal"
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+ nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+ nj=$njob
+ batch_size=1
+ gpuid_list=""
+ for JOB in $(seq ${nj}); do
+ gpuid_list=$gpuid_list"-1,"
+ done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+ split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if [ -n "${checkpoint_dir}" ]; then
+ python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+ model=${checkpoint_dir}/${model}
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+ echo "Decoding ..."
+ gpuid_list_array=(${gpuid_list//,/ })
+ for JOB in $(seq ${nj}); do
+ {
+ id=$((JOB-1))
+ gpuid=${gpuid_list_array[$id]}
+ mkdir -p ${output_dir}/output.$JOB
+ python infer.py \
+ --model ${model} \
+ --audio_in ${output_dir}/split/wav.$JOB.scp \
+ --output_dir ${output_dir}/output.$JOB \
+ --batch_size ${batch_size} \
+ --gpuid ${gpuid} \
+ --decoding_mode ${decoding_mode}
+ }&
+ done
+ wait
+
+ mkdir -p ${output_dir}/1best_recog
+ for f in token score text; do
+ if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+ for i in $(seq "${nj}"); do
+ cat "${output_dir}/output.${i}/1best_recog/${f}"
+ done | sort -k1 >"${output_dir}/1best_recog/${f}"
+ fi
+ done
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
+ echo "Computing WER ..."
+ cp ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
+ cp ${data_dir}/text ${output_dir}/1best_recog/text.ref
+ python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
+ tail -n 3 ${output_dir}/1best_recog/text.cer
+fi
+
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
+ echo "SpeechIO TIOBE textnorm"
+ echo "$0 --> Normalizing REF text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${data_dir}/text \
+ ${output_dir}/1best_recog/ref.txt
+
+ echo "$0 --> Normalizing HYP text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${output_dir}/1best_recog/text.proc \
+ ${output_dir}/1best_recog/rec.txt
+ grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt
+
+ echo "$0 --> computing WER/CER and alignment ..."
+ ./utils/error_rate_zh \
+ --tokenizer char \
+ --ref ${output_dir}/1best_recog/ref.txt \
+ --hyp ${output_dir}/1best_recog/rec_non_empty.txt \
+ ${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
+ rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
+fi
+
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py
deleted file mode 100644
index fd124ff..0000000
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import json
-import os
-import shutil
-
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-
-from funasr.utils.compute_wer import compute_wer
-
-
-def modelscope_infer_after_finetune(params):
- # prepare for decoding
- pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
- for file_name in params["required_files"]:
- if file_name == "configuration.json":
- with open(os.path.join(pretrained_model_path, file_name)) as f:
- config_dict = json.load(f)
- config_dict["model"]["am_model_name"] = params["decoding_model_name"]
- with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
- json.dump(config_dict, f, indent=4, separators=(',', ': '))
- else:
- shutil.copy(os.path.join(pretrained_model_path, file_name),
- os.path.join(params["output_dir"], file_name))
- decoding_path = os.path.join(params["output_dir"], "decode_results")
- if os.path.exists(decoding_path):
- shutil.rmtree(decoding_path)
- os.mkdir(decoding_path)
-
- # decoding
- inference_pipeline = pipeline(
- task=Tasks.auto_speech_recognition,
- model=params["output_dir"],
- output_dir=decoding_path,
- batch_size=1
- )
- audio_in = os.path.join(params["data_dir"], "wav.scp")
- inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
-
- # computer CER if GT text is set
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(decoding_path, "1best_recog/text")
- compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
-
-
-if __name__ == '__main__':
- params = {}
- params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online"
- params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
- params["output_dir"] = "./checkpoint"
- params["data_dir"] = "./data/test"
- params["decoding_model_name"] = "20epoch.pb"
- modelscope_infer_after_finetune(params)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/utils b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/utils
new file mode 120000
index 0000000..2ac163f
--- /dev/null
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/utils
@@ -0,0 +1 @@
+../../../../egs/aishell/transformer/utils
\ No newline at end of file
diff --git a/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py b/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py
index 3116f6d..581f7aa 100644
--- a/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py
+++ b/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py
@@ -4,8 +4,7 @@
inference_pipeline = pipeline(
task=Tasks.speech_timestamp,
model='damo/speech_timestamp_prediction-v1-16k-offline',
- model_revision='v1.1.0',
- output_dir=None)
+ model_revision='v1.1.0')
rec_result = inference_pipeline(
audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav',
diff --git a/funasr/build_utils/build_asr_model.py b/funasr/build_utils/build_asr_model.py
index 200395d..a76b204 100644
--- a/funasr/build_utils/build_asr_model.py
+++ b/funasr/build_utils/build_asr_model.py
@@ -408,10 +408,15 @@
**args.model_conf,
)
elif args.model == "timestamp_prediction":
+ # predictor
+ predictor_class = predictor_choices.get_class(args.predictor)
+ predictor = predictor_class(**args.predictor_conf)
+
model_class = model_choices.get_class(args.model)
model = model_class(
frontend=frontend,
encoder=encoder,
+ predictor=predictor,
token_list=token_list,
**args.model_conf,
)
diff --git a/funasr/models/e2e_asr_transducer.py b/funasr/models/e2e_asr_transducer.py
index 3f9f31c..4e33bd6 100644
--- a/funasr/models/e2e_asr_transducer.py
+++ b/funasr/models/e2e_asr_transducer.py
@@ -7,7 +7,9 @@
import torch
from packaging.version import parse as V
from typeguard import check_argument_types
-
+from funasr.losses.label_smoothing_loss import (
+ LabelSmoothingLoss, # noqa: H301
+)
from funasr.models.frontend.abs_frontend import AbsFrontend
from funasr.models.specaug.abs_specaug import AbsSpecAug
from funasr.models.decoder.rnnt_decoder import RNNTDecoder
@@ -15,6 +17,8 @@
from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.models.joint_net.joint_network import JointNetwork
from funasr.modules.nets_utils import get_transducer_task_io
+from funasr.modules.nets_utils import th_accuracy
+from funasr.modules.add_sos_eos import add_sos_eos
from funasr.layers.abs_normalize import AbsNormalize
from funasr.torch_utils.device_funcs import force_gatherable
from funasr.models.base_model import FunASRModel
diff --git a/funasr/runtime/html5/static/index.html b/funasr/runtime/html5/static/index.html
index 23d6fec..4943fc8 100644
--- a/funasr/runtime/html5/static/index.html
+++ b/funasr/runtime/html5/static/index.html
@@ -19,7 +19,9 @@
<div class="div_class_recordControl">
asr鏈嶅姟鍣ㄥ湴鍧�(蹇呭~):
<br>
- <input id="wssip" type="text" style=" width: 100%;height:100%" value="wss://127.0.0.1:10095/"/>
+ <input id="wssip" type="text" onchange="addresschange()" style=" width: 100%;height:100%" value="wss://127.0.0.1:10095/"/>
+ <br>
+ <a id="wsslink" href="#" onclick="window.open('https://127.0.0.1:10095/', '_blank')"><div id="info_wslink">鐐规澶勬墜宸ユ巿鏉僿ss://127.0.0.1:10095/</div></a>
<br>
<br>
<div style="border:2px solid #ccc;">
diff --git a/funasr/runtime/html5/static/main.js b/funasr/runtime/html5/static/main.js
index 04d22a9..4a50801 100644
--- a/funasr/runtime/html5/static/main.js
+++ b/funasr/runtime/html5/static/main.js
@@ -31,6 +31,9 @@
btnConnect= document.getElementById('btnConnect');
btnConnect.onclick = start;
+
+var awsslink= document.getElementById('wsslink');
+
var rec_text=""; // for online rec asr result
var offline_text=""; // for offline rec asr result
@@ -45,6 +48,27 @@
var totalsend=0;
+
+var now_ipaddress=window.location.href;
+now_ipaddress=now_ipaddress.replace("https://","wss://");
+now_ipaddress=now_ipaddress.replace("static/index.html","");
+var localport=window.location.port;
+now_ipaddress=now_ipaddress.replace(localport,"10095");
+document.getElementById('wssip').value=now_ipaddress;
+addresschange();
+function addresschange()
+{
+
+ var Uri = document.getElementById('wssip').value;
+ document.getElementById('info_wslink').innerHTML="鐐规澶勬墜宸ユ巿鏉冿紙IOS鎵嬫満锛�";
+ Uri=Uri.replace(/wss/g,"https");
+ console.log("addresschange uri=",Uri);
+
+ awsslink.onclick=function(){
+ window.open(Uri, '_blank');
+ }
+
+}
upfile.onclick=function()
{
btnStart.disabled = true;
@@ -77,7 +101,7 @@
var audio_record = document.getElementById('audio_record');
audio_record.src = (window.URL||webkitURL).createObjectURL(audioblob);
audio_record.controls=true;
- audio_record.play();
+ //audio_record.play(); //not auto play
}
function start_file_send()
{
@@ -223,7 +247,7 @@
stop();
console.log( 'connecttion error' );
- alert("杩炴帴鍦板潃"+document.getElementById('wssip').value+"澶辫触,璇锋鏌sr鍦板潃鍜岀鍙o紝骞剁‘淇漢5鏈嶅姟鍜宎sr鏈嶅姟鍦ㄥ悓涓�涓煙鍐呫�傛垨鎹釜娴忚鍣ㄨ瘯璇曘��");
+ alert("杩炴帴鍦板潃"+document.getElementById('wssip').value+"澶辫触,璇锋鏌sr鍦板潃鍜岀鍙c�傛垨璇曡瘯鐣岄潰涓婃墜鍔ㄦ巿鏉冿紝鍐嶈繛鎺ャ��");
btnStart.disabled = true;
btnStop.disabled = true;
btnConnect.disabled=false;
@@ -329,7 +353,7 @@
var audio_record = document.getElementById('audio_record');
audio_record.src = (window.URL||webkitURL).createObjectURL(theblob);
audio_record.controls=true;
- audio_record.play();
+ //audio_record.play();
} ,function(msg){
diff --git a/funasr/version.txt b/funasr/version.txt
index ef5e445..05e8a45 100644
--- a/funasr/version.txt
+++ b/funasr/version.txt
@@ -1 +1 @@
-0.6.5
+0.6.6
diff --git a/tests/test_tp_pipeline.py b/tests/test_tp_pipeline.py
new file mode 100644
index 0000000..07084f2
--- /dev/null
+++ b/tests/test_tp_pipeline.py
@@ -0,0 +1,30 @@
+import unittest
+
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+from modelscope.utils.logger import get_logger
+
+logger = get_logger()
+
+class TestTimestampPredictionPipelines(unittest.TestCase):
+ def test_funasr_path(self):
+ import funasr
+ import os
+ logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
+
+ def test_inference_pipeline(self):
+ inference_pipeline = pipeline(
+ task=Tasks.speech_timestamp,
+ model='damo/speech_timestamp_prediction-v1-16k-offline',
+ model_revision='v1.1.0')
+
+ rec_result = inference_pipeline(
+ audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav',
+ text_in='涓� 涓� 涓� 澶� 骞� 娲� 鍥� 瀹� 涓� 浠� 涔� 璺� 鍒� 瑗� 澶� 骞� 娲� 鏉� 浜� 鍛�',)
+ print(rec_result)
+ logger.info("punctuation inference result: {0}".format(rec_result))
+ assert rec_result=={'text': '<sil> 0.000 0.380;涓� 0.380 0.560;涓� 0.560 0.800;涓� 0.800 0.980;澶� 0.980 1.140;骞� 1.140 1.260;娲� 1.260 1.440;鍥� 1.440 1.680;瀹� 1.680 1.920;<sil> 1.920 2.040;涓� 2.040 2.200;浠� 2.200 2.320;涔� 2.320 2.500;璺� 2.500 2.680;鍒� 2.680 2.860;瑗� 2.860 3.040;澶� 3.040 3.200;骞� 3.200 3.380;娲� 3.380 3.500;鏉� 3.500 3.640;浜� 3.640 3.800;鍛� 3.800 4.150;<sil> 4.150 4.440;', 'timestamp': [[380, 560], [560, 800], [800, 980], [980, 1140], [1140, 1260], [1260, 1440], [1440, 1680], [1680, 1920], [2040, 2200], [2200, 2320], [2320, 2500], [2500, 2680], [2680, 2860], [2860, 3040], [3040, 3200], [3200, 3380], [3380, 3500], [3500, 3640], [3640, 3800], [3800, 4150]]}
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
--
Gitblit v1.9.1