| | |
| | | from funasr.utils import postprocess_utils |
| | | from funasr.utils.datadir_writer import DatadirWriter |
| | | from funasr.register import tables |
| | | |
| | | import pdb |
| | | @tables.register("model_classes", "LCBNet") |
| | | class LCBNet(nn.Module): |
| | |
| | | fusion_encoder_class = tables.encoder_classes.get(fusion_encoder) |
| | | fusion_encoder = fusion_encoder_class(**fusion_encoder_conf) |
| | | bias_predictor_class = tables.encoder_classes.get(bias_predictor) |
| | | bias_predictor = bias_predictor_class(bias_predictor_conf) |
| | | bias_predictor = bias_predictor_class(**bias_predictor_conf) |
| | | |
| | | |
| | | if decoder is not None: |
| | | decoder_class = tables.decoder_classes.get(decoder) |
| | |
| | | ind: int |
| | | """ |
| | | with autocast(False): |
| | | |
| | | pdb.set_trace() |
| | | # Data augmentation |
| | | if self.specaug is not None and self.training: |
| | | speech, speech_lengths = self.specaug(speech, speech_lengths) |
| | | |
| | | pdb.set_trace() |
| | | # Normalization for feature: e.g. Global-CMVN, Utterance-CMVN |
| | | if self.normalize is not None: |
| | | speech, speech_lengths = self.normalize(speech, speech_lengths) |
| | | |
| | | pdb.set_trace() |
| | | # Forward encoder |
| | | # feats: (Batch, Length, Dim) |
| | | # -> encoder_out: (Batch, Length2, Dim2) |
| | |
| | | |
| | | if intermediate_outs is not None: |
| | | return (encoder_out, intermediate_outs), encoder_out_lens |
| | | |
| | | pdb.set_trace() |
| | | return encoder_out, encoder_out_lens |
| | | |
| | | def _calc_att_loss( |
| | |
| | | logging.info("enable beam_search") |
| | | self.init_beam_search(**kwargs) |
| | | self.nbest = kwargs.get("nbest", 1) |
| | | pdb.set_trace() |
| | | |
| | | meta_data = {} |
| | | if isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank": # fbank |
| | |
| | | else: |
| | | # extract fbank feats |
| | | time1 = time.perf_counter() |
| | | audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), |
| | | sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), |
| | | data_type=kwargs.get("data_type", "sound"), |
| | | tokenizer=tokenizer) |
| | | time2 = time.perf_counter() |
| | | meta_data["load_data"] = f"{time2 - time1:0.3f}" |
| | | audio_sample_list = sample_list[0] |
| | | ocr_sample_list = sample_list[1] |
| | | speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), |
| | | frontend=frontend) |
| | | time3 = time.perf_counter() |
| | | meta_data["extract_feat"] = f"{time3 - time2:0.3f}" |
| | | meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000 |
| | | frame_shift = 10 |
| | | meta_data["batch_data_time"] = speech_lengths.sum().item() * frame_shift / 1000 |
| | | |
| | | speech = speech.to(device=kwargs["device"]) |
| | | speech_lengths = speech_lengths.to(device=kwargs["device"]) |
| | | pdb.set_trace() |
| | | # Encoder |
| | | encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) |
| | | if isinstance(encoder_out, tuple): |