From 31e2eb39ad3965931f9df22fce86c708f4d9da95 Mon Sep 17 00:00:00 2001
From: 语帆 <yf352572@alibaba-inc.com>
Date: 星期三, 28 二月 2024 16:14:57 +0800
Subject: [PATCH] test

---
 funasr/models/lcbnet/model.py |   17 ++++++++---------
 1 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/funasr/models/lcbnet/model.py b/funasr/models/lcbnet/model.py
index f45e71d..f4caee8 100644
--- a/funasr/models/lcbnet/model.py
+++ b/funasr/models/lcbnet/model.py
@@ -274,15 +274,12 @@
                 ind: int
         """
         with autocast(False):
-            pdb.set_trace()
             # Data augmentation
             if self.specaug is not None and self.training:
                 speech, speech_lengths = self.specaug(speech, speech_lengths)
-            pdb.set_trace()
             # Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
             if self.normalize is not None:
                 speech, speech_lengths = self.normalize(speech, speech_lengths)
-        pdb.set_trace()
         # Forward encoder
         # feats: (Batch, Length, Dim)
         # -> encoder_out: (Batch, Length2, Dim2)
@@ -299,7 +296,6 @@
         
         if intermediate_outs is not None:
             return (encoder_out, intermediate_outs), encoder_out_lens
-        pdb.set_trace()
         return encoder_out, encoder_out_lens
     
     def _calc_att_loss(
@@ -426,7 +422,6 @@
         else:
             # extract fbank feats
             time1 = time.perf_counter()
-            pdb.set_trace()
             sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
                                                             data_type=kwargs.get("data_type", "sound"),
                                                             tokenizer=tokenizer)
@@ -434,22 +429,26 @@
             meta_data["load_data"] = f"{time2 - time1:0.3f}"
             audio_sample_list = sample_list[0]
             ocr_sample_list = sample_list[1]
-            pdb.set_trace()
             speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
                                                    frontend=frontend)
-            pdb.set_trace()
             time3 = time.perf_counter()
             meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
-            meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
+            frame_shift = 10 
+            meta_data["batch_data_time"] = speech_lengths.sum().item() * frame_shift / 1000
 
         speech = speech.to(device=kwargs["device"])
         speech_lengths = speech_lengths.to(device=kwargs["device"])
-        pdb.set_trace()
         # Encoder
         encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
         if isinstance(encoder_out, tuple):
             encoder_out = encoder_out[0]
         
+        ocr_list_new = [[x + 1 if x != 0 else x for x in sublist] for sublist in ocr_sample_list]
+        ocr = torch.tensor(ocr_list_new)
+        ocr_lengths = ocr.new_full([1], dtype=torch.long, fill_value=ocr.size(1))
+        pdb.set_trace()
+        ocr, ocr_lens, _ = self.text_encoder(ocr, ocr_lengths)
+        pdb.set_trace()
         # c. Passed the encoder result and the beam search
         nbest_hyps = self.beam_search(
             x=encoder_out[0], maxlenratio=kwargs.get("maxlenratio", 0.0), minlenratio=kwargs.get("minlenratio", 0.0)

--
Gitblit v1.9.1