examples/industrial_data_pretraining/lcbnet/demo2.sh
@@ -1,4 +1,4 @@ file_dir=./exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch file_dir="/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch" python -m funasr.bin.inference \ @@ -9,6 +9,7 @@ ++frontend_conf.cmvn_file=${file_dir}/am.mvn \ ++input=${file_dir}/wav.scp \ ++input=${file_dir}/ocr_text \ +data_type='["sound", "text"]' \ ++tokenizer_conf.bpemodel=${file_dir}/bpe.model \ ++output_dir="./outputs/debug" \ ++device="" \