游雁
2023-11-16 4ace5a95b052d338947fc88809a440ccd55cf6b4
funasr/utils/wav_utils.py
@@ -11,6 +11,7 @@
import numpy as np
import torch
import torchaudio
import soundfile
import torchaudio.compliance.kaldi as kaldi
@@ -162,7 +163,13 @@
        waveform = torch.from_numpy(waveform.reshape(1, -1))
    else:
        # load pcm from wav, and resample
        waveform, audio_sr = torchaudio.load(wav_file)
        try:
            waveform, audio_sr = torchaudio.load(wav_file)
        except:
            waveform, audio_sr = soundfile.read(wav_file, dtype='float32')
            if waveform.ndim == 2:
                waveform = waveform[:, 0]
            waveform = torch.tensor(np.expand_dims(waveform, axis=0))
        waveform = waveform * (1 << 15)
        waveform = torch_resample(waveform, audio_sr, model_sr)
@@ -181,7 +188,11 @@
def wav2num_frame(wav_path, frontend_conf):
    waveform, sampling_rate = torchaudio.load(wav_path)
    try:
        waveform, sampling_rate = torchaudio.load(wav_path)
    except:
        waveform, sampling_rate = soundfile.read(wav_path)
        waveform = torch.tensor(np.expand_dims(waveform, axis=0))
    speech_length = (waveform.shape[1] / sampling_rate) * 1000.
    n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])
    feature_dim = frontend_conf["n_mels"] * frontend_conf["lfr_m"]
@@ -298,18 +309,24 @@
    os.rename(text_file, "{}.bak".format(text_file))
    wav_dict = {}
    for line in wav_lines:
        sample_name, wav_path = line.strip().split()
        parts = line.strip().split()
        if len(parts) != 2:
            continue
        sample_name, wav_path = parts
        wav_dict[sample_name] = wav_path
    text_dict = {}
    for line in text_lines:
        sample_name, txt = line.strip().split(" ", 1)
        text_dict[sample_name] = txt
        parts = line.strip().split()
        if len(parts) < 2:
            continue
        sample_name = parts[0]
        text_dict[sample_name] = " ".join(parts[1:]).lower()
    filter_count = 0
    with open(wav_file) as f_wav, open(text_file) as f_text:
    with open(wav_file, "w") as f_wav, open(text_file, "w") as f_text:
        for sample_name, wav_path in wav_dict.items():
            if sample_name in text_dict.keys():
                f_wav.write(sample_name + " " + wav_path  + "\n")
                f_text.write(sample_name + " " + text_dict[sample_name] + "\n")
            else:
                filter_count += 1
    print("{}/{} samples in {} are filtered because of the mismatch between wav.scp and text".format(len(wav_lines), filter_count, dataset))
    print("{}/{} samples in {} are filtered because of the mismatch between wav.scp and text".format(len(wav_lines), filter_count, dataset))