aky15
2023-05-17 429995f4d007adba9099196a868c65a5b6cf14a5
Merge branch 'dev_infer' of https://github.com/alibaba-damo-academy/FunASR into dev_infer
36个文件已修改
288 ■■■■ 已修改文件
egs/aishell/conformer/conf/train_asr_conformer.yaml 4 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/conformer/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/data2vec_paraformer_finetune/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/data2vec_transformer_finetune/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/paraformer/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/paraformerbert/local/extract_embeds.sh 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/paraformerbert/run.sh 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/transformer/conf/train_asr_transformer.yaml 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/transformer/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/conformer/conf/train_asr_conformer.yaml 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/conformer/run.sh 3 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/data2vec_pretrain/run.sh 3 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/paraformer/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/paraformerbert/local/extract_embeds.sh 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/paraformerbert/run.sh 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/transformer/conf/train_asr_transformer.yaml 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell2/transformer/run.sh 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/librispeech/conformer/run.sh 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/librispeech_100h/conformer/run.sh 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py 8 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py 8 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/asr_infer.py 17 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/diar_inference_launch.py 3 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/sv_infer.py 1 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/sv_inference_launch.py 3 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/train.py 6 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/encoder/sanm_encoder.py 11 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/predictor/cif.py 5 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/utils/prepare_data.py 177 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs/aishell/conformer/conf/train_asr_conformer.yaml
@@ -83,6 +83,8 @@
    num_time_mask: 2
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
        shuffle_size: 2048
@@ -93,4 +95,4 @@
    num_workers: 8
log_interval: 50
normalize: None
normalize: None
egs/aishell/conformer/run.sh
@@ -135,6 +135,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --resume true \
egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml
@@ -105,6 +105,8 @@
  r_order: 1
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
        shuffle_size: 2048
egs/aishell/data2vec_paraformer_finetune/run.sh
@@ -139,6 +139,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --init_param ${init_param} \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --resume true \
egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml
@@ -96,6 +96,8 @@
    num_time_mask: 2
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
        shuffle_size: 2048
egs/aishell/data2vec_transformer_finetune/run.sh
@@ -139,6 +139,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --init_param ${init_param} \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml
@@ -93,6 +93,8 @@
    tail_threshold: 0.45
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
        shuffle_size: 2048
egs/aishell/paraformer/run.sh
@@ -135,6 +135,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --resume true \
egs/aishell/paraformerbert/local/extract_embeds.sh
@@ -54,6 +54,8 @@
            cat ${local_records_dir}/embeds.${JOB}.shape || exit 1;
        done > ${local_scp_dir_raw}/embeds.shape
    fi
    cp ${local_scp_dir_raw}/embeds.scp  ${raw_dataset_path}/data/${data_set}/embeds.scp
done
echo "embeds is in: ${local_scp_dir_raw}"
egs/aishell/paraformerbert/run.sh
@@ -146,7 +146,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --embed_path ${feats_dir}/data \
                --data_file_names "wav.scp,text,embed.scp" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --resume true \
egs/aishell/transformer/conf/train_asr_transformer.yaml
@@ -73,6 +73,8 @@
    warmup_steps: 25000
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
        shuffle_size: 2048
egs/aishell/transformer/run.sh
@@ -135,6 +135,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --resume true \
egs/aishell2/conformer/conf/train_asr_conformer.yaml
@@ -84,6 +84,7 @@
    num_time_mask: 2
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
egs/aishell2/conformer/run.sh
@@ -103,8 +103,6 @@
    utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/${train_set}/text | cut -f 2- -d" " | tr " " "\n" \
        | sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
    echo "<unk>" >> ${token_list}
    mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${train_set}
    mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}
 fi
# LM Training Stage
@@ -139,6 +137,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --dataset_type $dataset_type \
egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml
@@ -72,8 +72,8 @@
# for dataset
dataset_conf:
    batch_mode: clipping
    data_names: speech,none
    data_types: sound,none
    data_names: speech
    data_types: sound
    shuffle: true
    shuffle_conf:
        shuffle_size: 12800
egs/aishell2/data2vec_pretrain/run.sh
@@ -82,8 +82,6 @@
    utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/${train_set}/text | cut -f 2- -d" " | tr " " "\n" \
        | sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
    echo "<unk>" >> ${token_list}
    mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${train_set}
    mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}
 fi
# Training Stage
@@ -110,6 +108,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --dataset_type $dataset_type \
egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml
@@ -94,6 +94,7 @@
  r_order: 1
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
egs/aishell2/paraformer/run.sh
@@ -137,6 +137,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --dataset_type $dataset_type \
egs/aishell2/paraformerbert/local/extract_embeds.sh
@@ -54,6 +54,8 @@
            cat ${local_records_dir}/embeds.${JOB}.shape || exit 1;
        done > ${local_scp_dir_raw}/embeds.shape
    fi
    cp ${local_scp_dir_raw}/embeds.scp  ${raw_dataset_path}/data/${data_set}/embeds.scp
done
echo "embeds is in: ${local_scp_dir_raw}"
egs/aishell2/paraformerbert/run.sh
@@ -147,7 +147,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --embed_path ${feats_dir}/data \
                --data_file_names "wav.scp,text,embed.scp" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --dataset_type $dataset_type \
egs/aishell2/transformer/conf/train_asr_transformer.yaml
@@ -78,6 +78,7 @@
    num_time_mask: 2
dataset_conf:
    data_names: speech,text
    data_types: sound,text
    shuffle: True
    shuffle_conf:
egs/aishell2/transformer/run.sh
@@ -137,6 +137,7 @@
                --data_dir ${feats_dir}/data \
                --train_set ${train_set} \
                --valid_set ${valid_set} \
                --data_file_names "wav.scp,text" \
                --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
                --speed_perturb ${speed_perturb} \
                --dataset_type $dataset_type \
egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml
@@ -43,7 +43,6 @@
  pooling_type: statistic
  num_nodes_resnet1: 256
  num_nodes_last_layer: 256
  batchnorm_momentum: 0.5
# decoder related
decoder: sa_decoder
egs/librispeech/conformer/run.sh
@@ -55,7 +55,7 @@
inference_config=conf/decode_asr_transformer.yaml
#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml
inference_asr_model=valid.acc.ave_10best.pth
inference_asr_model=valid.acc.ave_10best.pb
# you can set gpu num for decoding here
gpuid_list=$CUDA_VISIBLE_DEVICES  # set gpus for decoding, the same as training stage by default
egs/librispeech_100h/conformer/run.sh
@@ -55,7 +55,7 @@
inference_config=conf/decode_asr_transformer.yaml
#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml
inference_asr_model=valid.acc.ave_10best.pth
inference_asr_model=valid.acc.ave_10best.pb
# you can set gpu num for decoding here
gpuid_list=$CUDA_VISIBLE_DEVICES  # set gpus for decoding, the same as training stage by default
egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py
@@ -7,8 +7,9 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
# 初始化推理 pipeline
# 当以原始音频作为输入时使用配置文件 sond.yaml,并设置 mode 为sond_demo
# initialize the pipeline for inference
# when using the raw waveform files to inference, please use the config file `sond.yaml`
# and set mode to `sond_demo`
inference_diar_pipline = pipeline(
    mode="sond_demo",
    num_workers=0,
@@ -19,7 +20,8 @@
    sv_model_revision="master",
)
# 以 audio_list 作为输入,其中第一个音频为待检测语音,后面的音频为不同说话人的声纹注册语音
# use audio_list as the input, where the first one is the record to be detected
# and the following files are enrollments for different speakers
audio_list = [
    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/record.wav",
    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/spk_A.wav",
egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py
@@ -7,8 +7,9 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
# 初始化推理 pipeline
# 当以原始音频作为输入时使用配置文件 sond.yaml,并设置 mode 为sond_demo
# initialize the pipeline for inference
# when using the raw waveform files to inference, please use the config file `sond.yaml`
# and set mode to `sond_demo`
inference_diar_pipline = pipeline(
    mode="sond_demo",
    num_workers=0,
@@ -19,7 +20,8 @@
    sv_model_revision="master",
)
# 以 audio_list 作为输入,其中第一个音频为待检测语音,后面的音频为不同说话人的声纹注册语音
# use audio_list as the input, where the first one is the record to be detected
# and the following files are enrollments for different speakers
audio_list = [
    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/speaker_diarization/record.wav",
    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/speaker_diarization/spk1.wav",
egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py
@@ -7,13 +7,13 @@
        model='damo/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch'
    )
    # 两个语音为相同说话人
    # the same speaker
    rec_result = inference_sv_pipline(audio_in=(
        'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_enroll.wav',
        'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_same.wav'))
    print("Similarity", rec_result["scores"])
    # 两个语音为不同说话人
    # different speaker
    rec_result = inference_sv_pipline(audio_in=(
        'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_enroll.wav',
        'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_different.wav'))
funasr/bin/asr_infer.py
@@ -762,23 +762,6 @@
                feats_len = speech_lengths
            if feats.shape[1] != 0:
                if cache_en["is_final"]:
                    if feats.shape[1] + cache_en["chunk_size"][2] < cache_en["chunk_size"][1]:
                        cache_en["last_chunk"] = True
                    else:
                        # first chunk
                        feats_chunk1 = feats[:, :cache_en["chunk_size"][1], :]
                        feats_len = torch.tensor([feats_chunk1.shape[1]])
                        results_chunk1 = self.infer(feats_chunk1, feats_len, cache)
                        # last chunk
                        cache_en["last_chunk"] = True
                        feats_chunk2 = feats[:, -(feats.shape[1] + cache_en["chunk_size"][2] - cache_en["chunk_size"][1]):, :]
                        feats_len = torch.tensor([feats_chunk2.shape[1]])
                        results_chunk2 = self.infer(feats_chunk2, feats_len, cache)
                        return [" ".join(results_chunk1 + results_chunk2)]
                results = self.infer(feats, feats_len, cache)
        return results
funasr/bin/diar_inference_launch.py
@@ -38,7 +38,6 @@
from scipy.signal import medfilt
from funasr.utils.cli_utils import get_commandline_args
from funasr.tasks.diar import DiarTask
from funasr.tasks.asr import ASRTask
from funasr.tasks.diar import EENDOLADiarTask
from funasr.torch_utils.device_funcs import to_device
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
@@ -187,7 +186,7 @@
                raise TypeError("raw_inputs must be a list or tuple in [speech, profile1, profile2, ...] ")
        else:
            # 3. Build data-iterator
            loader = ASRTask.build_streaming_iterator(
            loader = DiarTask.build_streaming_iterator(
                data_path_and_name_and_type,
                dtype=dtype,
                batch_size=batch_size,
funasr/bin/sv_infer.py
@@ -23,7 +23,6 @@
from funasr.utils.cli_utils import get_commandline_args
from funasr.tasks.sv import SVTask
from funasr.tasks.asr import ASRTask
from funasr.torch_utils.device_funcs import to_device
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
from funasr.utils import config_argparse
funasr/bin/sv_inference_launch.py
@@ -34,7 +34,6 @@
from funasr.utils.cli_utils import get_commandline_args
from funasr.tasks.sv import SVTask
from funasr.tasks.asr import ASRTask
from funasr.torch_utils.device_funcs import to_device
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
from funasr.utils import config_argparse
@@ -115,7 +114,7 @@
            data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
        
        # 3. Build data-iterator
        loader = ASRTask.build_streaming_iterator(
        loader = SVTask.build_streaming_iterator(
            data_path_and_name_and_type,
            dtype=dtype,
            batch_size=batch_size,
funasr/bin/train.py
@@ -335,6 +335,12 @@
        help="dev dataset",
    )
    parser.add_argument(
        "--data_file_names",
        type=str,
        default="wav.scp,text",
        help="input data files",
    )
    parser.add_argument(
        "--speed_perturb",
        type=float,
        nargs="+",
funasr/models/encoder/sanm_encoder.py
@@ -355,18 +355,9 @@
    def _add_overlap_chunk(self, feats: np.ndarray, cache: dict = {}):
        if len(cache) == 0:
            return feats
        # process last chunk
        cache["feats"] = to_device(cache["feats"], device=feats.device)
        overlap_feats = torch.cat((cache["feats"], feats), dim=1)
        if cache["is_final"]:
            cache["feats"] = overlap_feats[:, -cache["chunk_size"][0]:, :]
            if not cache["last_chunk"]:
               padding_length = sum(cache["chunk_size"]) - overlap_feats.shape[1]
               overlap_feats = overlap_feats.transpose(1, 2)
               overlap_feats = F.pad(overlap_feats, (0, padding_length))
               overlap_feats = overlap_feats.transpose(1, 2)
        else:
            cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
        cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
        return overlap_feats
    def forward_chunk(self,
funasr/models/predictor/cif.py
@@ -221,13 +221,14 @@
        if cache is not None and "chunk_size" in cache:
            alphas[:, :cache["chunk_size"][0]] = 0.0
            alphas[:, sum(cache["chunk_size"][:2]):] = 0.0
            if "is_final" in cache and not cache["is_final"]:
                alphas[:, sum(cache["chunk_size"][:2]):] = 0.0
        if cache is not None and "cif_alphas" in cache and "cif_hidden" in cache:
            cache["cif_hidden"] = to_device(cache["cif_hidden"], device=hidden.device)
            cache["cif_alphas"] = to_device(cache["cif_alphas"], device=alphas.device)
            hidden = torch.cat((cache["cif_hidden"], hidden), dim=1)
            alphas = torch.cat((cache["cif_alphas"], alphas), dim=1)
        if cache is not None and "last_chunk" in cache and cache["last_chunk"]:
        if cache is not None and "is_final" in cache and cache["is_final"]:
            tail_hidden = torch.zeros((batch_size, 1, hidden_size), device=hidden.device)
            tail_alphas = torch.tensor([[self.tail_threshold]], device=alphas.device)
            tail_alphas = torch.tile(tail_alphas, (batch_size, 1))
funasr/utils/prepare_data.py
@@ -3,6 +3,7 @@
import shutil
from multiprocessing import Pool
import kaldiio
import numpy as np
import torch.distributed as dist
import torchaudio
@@ -48,49 +49,80 @@
def calc_shape_core(root_path, args, idx):
    wav_scp_file = os.path.join(root_path, "wav.scp.{}".format(idx))
    shape_file = os.path.join(root_path, "speech_shape.{}".format(idx))
    with open(wav_scp_file) as f:
    file_name = args.data_file_names.split(",")[0]
    data_name = args.dataset_conf.get("data_names", "speech,text").split(",")[0]
    scp_file = os.path.join(root_path, "{}.{}".format(file_name, idx))
    shape_file = os.path.join(root_path, "{}_shape.{}".format(data_name, idx))
    with open(scp_file) as f:
        lines = f.readlines()
    frontend_conf = args.frontend_conf
    dataset_conf = args.dataset_conf
    speech_length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "speech_length_min") else -1
    speech_length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "speech_length_max") else -1
    with open(shape_file, "w") as f:
        for line in lines:
            sample_name, wav_path = line.strip().split()
            n_frames, feature_dim = wav2num_frame(wav_path, frontend_conf)
            write_flag = True
            if n_frames > 0 and speech_length_min > 0:
                write_flag = n_frames >= speech_length_min
            if n_frames > 0 and speech_length_max > 0:
                write_flag = n_frames <= speech_length_max
            if write_flag:
                f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
    data_type = args.dataset_conf.get("data_types", "sound,text").split(",")[0]
    if data_type == "sound":
        frontend_conf = args.frontend_conf
        dataset_conf = args.dataset_conf
        length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "{}_length_min".format(data_name)) else -1
        length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "{}_length_max".format(data_name)) else -1
        with open(shape_file, "w") as f:
            for line in lines:
                sample_name, wav_path = line.strip().split()
                n_frames, feature_dim = wav2num_frame(wav_path, frontend_conf)
                write_flag = True
                if n_frames > 0 and length_min > 0:
                    write_flag = n_frames >= length_min
                if n_frames > 0 and length_max > 0:
                    write_flag = n_frames <= length_max
                if write_flag:
                    f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
                    f.flush()
    elif data_type == "kaldi_ark":
        dataset_conf = args.dataset_conf
        length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "{}_length_min".format(data_name)) else -1
        length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "{}_length_max".format(data_name)) else -1
        with open(shape_file, "w") as f:
            for line in lines:
                sample_name, feature_path = line.strip().split()
                feature = kaldiio.load_mat(feature_path)
                n_frames, feature_dim = feature.shape
                if n_frames > 0 and length_min > 0:
                    write_flag = n_frames >= length_min
                if n_frames > 0 and length_max > 0:
                    write_flag = n_frames <= length_max
                if write_flag:
                    f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
                    f.flush()
    elif data_type == "text":
        with open(shape_file, "w") as f:
            for line in lines:
                sample_name, text = line.strip().split(maxsplit=1)
                n_tokens = len(text.split())
                f.write("{} {}\n".format(sample_name, str(int(np.ceil(n_tokens)))))
                f.flush()
    else:
        raise RuntimeError("Unsupported data_type: {}".format(data_type))
def calc_shape(args, dataset, nj=64):
    shape_path = os.path.join(args.data_dir, dataset, "speech_shape")
    data_name = args.dataset_conf.get("data_names", "speech,text").split(",")[0]
    shape_path = os.path.join(args.data_dir, dataset, "{}_shape".format(data_name))
    if os.path.exists(shape_path):
        logging.info('Shape file for small dataset already exists.')
        return
    split_shape_path = os.path.join(args.data_dir, dataset, "shape_files")
    split_shape_path = os.path.join(args.data_dir, dataset, "{}_shape_files".format(data_name))
    if os.path.exists(split_shape_path):
        shutil.rmtree(split_shape_path)
    os.mkdir(split_shape_path)
    # split
    wav_scp_file = os.path.join(args.data_dir, dataset, "wav.scp")
    with open(wav_scp_file) as f:
    file_name = args.data_file_names.split(",")[0]
    scp_file = os.path.join(args.data_dir, dataset, file_name)
    with open(scp_file) as f:
        lines = f.readlines()
        num_lines = len(lines)
        num_job_lines = num_lines // nj
    start = 0
    for i in range(nj):
        end = start + num_job_lines
        file = os.path.join(split_shape_path, "wav.scp.{}".format(str(i + 1)))
        file = os.path.join(split_shape_path, "{}.{}".format(file_name, str(i + 1)))
        with open(file, "w") as f:
            if i == nj - 1:
                f.writelines(lines[start:])
@@ -108,15 +140,18 @@
    # combine
    with open(shape_path, "w") as f:
        for i in range(nj):
            job_file = os.path.join(split_shape_path, "speech_shape.{}".format(str(i + 1)))
            job_file = os.path.join(split_shape_path, "{}_shape.{}".format(data_name, str(i + 1)))
            with open(job_file) as job_f:
                lines = job_f.readlines()
                f.writelines(lines)
    logging.info('Generating shape files done.')
def generate_data_list(data_dir, dataset, nj=64):
    list_file = os.path.join(data_dir, dataset, "data.list")
def generate_data_list(args, data_dir, dataset, nj=64):
    data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
    file_names = args.data_file_names.split(",")
    concat_data_name = "_".join(data_names)
    list_file = os.path.join(data_dir, dataset, "{}_data.list".format(concat_data_name))
    if os.path.exists(list_file):
        logging.info('Data list for large dataset already exists.')
        return
@@ -125,85 +160,67 @@
        shutil.rmtree(split_path)
    os.mkdir(split_path)
    with open(os.path.join(data_dir, dataset, "wav.scp")) as f_wav:
        wav_lines = f_wav.readlines()
    with open(os.path.join(data_dir, dataset, "text")) as f_text:
        text_lines = f_text.readlines()
    num_lines = len(wav_lines)
    data_lines_list = []
    for file_name in file_names:
        with open(os.path.join(data_dir, dataset, file_name)) as f:
            lines = f.readlines()
            data_lines_list.append(lines)
    num_lines = len(data_lines_list[0])
    num_job_lines = num_lines // nj
    start = 0
    for i in range(nj):
        end = start + num_job_lines
        split_path_nj = os.path.join(split_path, str(i + 1))
        os.mkdir(split_path_nj)
        wav_file = os.path.join(split_path_nj, "wav.scp")
        text_file = os.path.join(split_path_nj, "text")
        with open(wav_file, "w") as fw, open(text_file, "w") as ft:
            if i == nj - 1:
                fw.writelines(wav_lines[start:])
                ft.writelines(text_lines[start:])
            else:
                fw.writelines(wav_lines[start:end])
                ft.writelines(text_lines[start:end])
        for file_id, file_name in enumerate(file_names):
            file = os.path.join(split_path_nj, file_name)
            with open(file, "w") as f:
                if i == nj - 1:
                    f.writelines(data_lines_list[file_id][start:])
                else:
                    f.writelines(data_lines_list[file_id][start:end])
        start = end
    with open(list_file, "w") as f_data:
        for i in range(nj):
            wav_path = os.path.join(split_path, str(i + 1), "wav.scp")
            text_path = os.path.join(split_path, str(i + 1), "text")
            f_data.write(wav_path + " " + text_path + "\n")
            path = ""
            for file_name in file_names:
                path = path + os.path.join(split_path, str(i + 1), file_name)
            f_data.write(path + "\n")
def prepare_data(args, distributed_option):
    distributed = distributed_option.distributed
    if not distributed or distributed_option.dist_rank == 0:
        filter_wav_text(args.data_dir, args.train_set)
        filter_wav_text(args.data_dir, args.valid_set)
        if hasattr(args, "filter_input") and args.filter_input:
            filter_wav_text(args.data_dir, args.train_set)
            filter_wav_text(args.data_dir, args.valid_set)
        if args.dataset_type == "small":
            calc_shape(args, args.train_set)
            calc_shape(args, args.valid_set)
        if args.dataset_type == "large":
            generate_data_list(args.data_dir, args.train_set)
            generate_data_list(args.data_dir, args.valid_set)
            generate_data_list(args, args.data_dir, args.train_set)
            generate_data_list(args, args.data_dir, args.valid_set)
    data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
    data_types = args.dataset_conf.get("data_types", "sound,text").split(",")
    file_names = args.data_file_names.split(",")
    print("data_names: {}, data_types: {}, file_names: {}".format(data_names, data_types, file_names))
    assert len(data_names) == len(data_types) == len(file_names)
    if args.dataset_type == "small":
        args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "speech_shape")]
        args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "speech_shape")]
        data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
        data_types = args.dataset_conf.get("data_types", "sound,text").split(",")
        args.train_data_path_and_name_and_type = [
            ["{}/{}/wav.scp".format(args.data_dir, args.train_set), data_names[0], data_types[0]],
            ["{}/{}/text".format(args.data_dir, args.train_set), data_names[1], data_types[1]]
        ]
        args.valid_data_path_and_name_and_type = [
            ["{}/{}/wav.scp".format(args.data_dir, args.valid_set), data_names[0], data_types[0]],
            ["{}/{}/text".format(args.data_dir, args.valid_set), data_names[1], data_types[1]]
        ]
        if args.embed_path is not None:
        args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "{}_shape".format(data_names[0]))]
        args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}_shape".format(data_names[0]))]
        args.train_data_path_and_name_and_type, args.valid_data_path_and_name_and_type = [], []
        for file_name, data_name, data_type in zip(file_names, data_names, data_types):
            args.train_data_path_and_name_and_type.append(
                [os.path.join(args.embed_path, "embeds", args.train_set, "embeds.scp"), "embed", "kaldi_ark"])
                ["{}/{}/{}".format(args.data_dir, args.train_set, file_name), data_name, data_type])
            args.valid_data_path_and_name_and_type.append(
                [os.path.join(args.embed_path, "embeds", args.valid_set, "embeds.scp"), "embed", "kaldi_ark"])
                ["{}/{}/{}".format(args.data_dir, args.valid_set, file_name), data_name, data_type])
    else:
        args.train_data_file = os.path.join(args.data_dir, args.train_set, "data.list")
        args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data.list")
        if args.embed_path is not None:
            if not distributed or distributed_option.dist_rank == 0:
                for d in [args.train_set, args.valid_set]:
                    file = os.path.join(args.data_dir, d, "data.list")
                    with open(file) as f:
                        lines = f.readlines()
                    out_file = os.path.join(args.data_dir, d, "data_with_embed.list")
                    with open(out_file, "w") as out_f:
                        for line in lines:
                            parts = line.strip().split()
                            idx = parts[0].split("/")[-2]
                            embed_file = os.path.join(args.embed_path, "embeds", args.valid_set, "ark",
                                                      "embeds.{}.ark".format(idx))
                            out_f.write(parts[0] + " " + parts[1] + " " + embed_file + "\n")
            args.train_data_file = os.path.join(args.data_dir, args.train_set, "data_with_embed.list")
            args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data_with_embed.list")
        concat_data_name = "_".join(data_names)
        args.train_data_file = os.path.join(args.data_dir, args.train_set, "{}_data.list".format(concat_data_name))
        args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "{}_data.list".format(concat_data_name))
    if distributed:
        dist.barrier()