From 82530ddf974a706df5a6a1e258d80c8dbc3f1d72 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 10 六月 2024 09:19:16 +0800
Subject: [PATCH] fix bug

---
 funasr/models/llm_asr/model.py |   41 +++++++++++++++++++++++++++++------------
 1 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 697f78d..21072b0 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -19,6 +19,7 @@
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.register import tables
 from funasr.train_utils.device_funcs import to_device
+import traceback
 
 
 @tables.register("model_classes", "LLMASR")
@@ -489,6 +490,7 @@
             fbank_fake_len = fbank_fake_lens[batch_idx].item()
             fbank_beg_idx = fbank_beg[batch_idx, 0].item()
             min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+
             try:
                 inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
                     batch_idx, :min_len, :
@@ -496,10 +498,10 @@
             except Exception as e:
                 logging.error(f"{str(e)}, {traceback.format_exc()}")
                 logging.info(
-                    f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}"
+                    f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens[batch_idx].item()}"
                 )
                 fbank_fake_len = encoder_out_lens[batch_idx].item()
-                min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+                min_len = min(fbank_fake_len, min_len)
                 inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
                     batch_idx, :min_len, :
                 ]
@@ -554,7 +556,7 @@
 
         return contents
 
-    def data_load_speech(self, contents: dict, tokenizer, frontend, **kwargs):
+    def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
 
         system = contents["system"]
         user = contents["user"]
@@ -592,7 +594,10 @@
                     )
                     if sub_str.startswith("!"):
                         try:
+                            time1 = time.perf_counter()
                             data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+                            time2 = time.perf_counter()
+                            meta_data["load_data"] = f"{time2 - time1:0.3f}"
                         except Exception as e:
                             logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
 
@@ -602,6 +607,15 @@
                             frontend=frontend,
                             is_final=True,
                         )  # speech: [b, T, d]
+
+                        time3 = time.perf_counter()
+                        meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+                        meta_data["batch_data_time"] = (
+                            speech_lengths.sum().item()
+                            * frontend.frame_shift
+                            * frontend.lfr_n
+                            / 1000
+                        )
 
                         if kwargs.get("permute", True):
                             speech = speech.permute(0, 2, 1)
@@ -664,7 +678,7 @@
             raise NotImplementedError("batch decoding is not implemented")
 
         contents = self.data_template(data_in[0])
-        output = self.data_load_speech(contents, tokenizer, frontend, **kwargs)
+        output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
         batch = to_device(output, kwargs["device"])
 
         # audio encoder
@@ -692,19 +706,20 @@
                 batch_idx, :min_len, :
             ]
 
+        label = contents["assistant"][0]
         if not kwargs.get("tearchforing", False):
 
             generated_ids = self.llm.generate(
                 inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
             )
-            generated_ids = [
-                output_ids[len(input_id) :]
-                for input_id, output_ids in zip(input_ids, generated_ids)
-            ]
+            # generated_ids = [
+            #     output_ids[len(input_id) :]
+            #     for input_id, output_ids in zip(input_ids, generated_ids)
+            # ]
             response = tokenizer.batch_decode(
                 generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
             )[0]
-            label = contents["assistant"][0]
+
             loss = None
         else:
 
@@ -715,13 +730,13 @@
                 inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
             )
 
-            preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1]]
+            preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
             response = tokenizer.batch_decode(
                 preds,
                 add_special_tokens=False,
                 skip_special_tokens=kwargs.get("skip_special_tokens", True),
             )[0]
-            loss = model_outputs.loss
+            loss = model_outputs.loss.item()
 
         ibest_writer = None
         if kwargs.get("output_dir") is not None:
@@ -730,7 +745,8 @@
             ibest_writer = self.writer[f"{0 + 1}best_recog"]
 
         results = []
-        result_i = {"key": key[0], "text": response, "label": label}
+        response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response)
+        result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label}
         if loss is not None:
             result_i["loss"] = loss
         results.append(result_i)
@@ -738,5 +754,6 @@
         if ibest_writer is not None:
             ibest_writer["text"][key[0]] = response
             ibest_writer["label"][key[0]] = label
+            ibest_writer["text_tn"][key[0]] = response_clean
 
         return results, meta_data

--
Gitblit v1.9.1