shixian.shi
2024-01-10 e30a17cf4e715b3d139fa1e0ba01cda1bcf0f884
funasr/models/fsmn_vad/model.py
@@ -8,8 +8,8 @@
import math
from typing import Optional
import time
from funasr.utils.register import register_class, registry_tables
from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio,extract_fbank
from funasr.register import tables
from funasr.utils.load_utils import load_audio_text_image_video,extract_fbank
from funasr.utils.datadir_writer import DatadirWriter
from torch.nn.utils.rnn import pad_sequence
@@ -218,7 +218,7 @@
        return int(self.frame_size_ms)
@register_class("model_classes", "FsmnVAD")
@tables.register("model_classes", "FsmnVAD")
class FsmnVAD(nn.Module):
    """
    Author: Speech Lab of DAMO Academy, Alibaba Group
@@ -238,7 +238,7 @@
                                               self.vad_opts.speech_to_sil_time_thres,
                                               self.vad_opts.frame_in_ms)
        
        encoder_class = registry_tables.encoder_classes.get(encoder.lower())
        encoder_class = tables.encoder_classes.get(encoder.lower())
        encoder = encoder_class(**encoder_conf)
        self.encoder = encoder
        # init variables
@@ -544,7 +544,7 @@
        else:
            # extract fbank feats
            time1 = time.perf_counter()
            audio_sample_list = load_audio(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
            audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
            time2 = time.perf_counter()
            meta_data["load_data"] = f"{time2 - time1:0.3f}"
            speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),