From 6c467e6f0abfc6d20d0621fbbf67b4dbd81776cc Mon Sep 17 00:00:00 2001
From: Shi Xian <40013335+R1ckShi@users.noreply.github.com>
Date: 星期二, 18 六月 2024 10:01:56 +0800
Subject: [PATCH] Merge pull request #1825 from modelscope/dev_libt

---
 funasr/models/llm_asr/model.py |  326 +++++++++++++++++++++++++++++++++++++++---------------
 1 files changed, 236 insertions(+), 90 deletions(-)

diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 411b59d..c209026 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -6,7 +6,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 from torch.cuda.amp import autocast
-
+import re
 from funasr.models.scama.utils import sequence_mask
 from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
 from funasr.models.ctc.ctc import CTC
@@ -18,6 +18,8 @@
 from funasr.utils import postprocess_utils
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.register import tables
+from funasr.train_utils.device_funcs import to_device
+import traceback
 
 
 @tables.register("model_classes", "LLMASR")
@@ -164,8 +166,6 @@
                 text: (Batch, Length)
                 text_lengths: (Batch,)
         """
-        # import pdb;
-        # pdb.set_trace()
         if len(text_lengths.size()) > 1:
             text_lengths = text_lengths[:, 0]
         if len(speech_lengths.size()) > 1:
@@ -468,7 +468,7 @@
         if len(speech_lengths.size()) > 1:
             speech_lengths = speech_lengths[:, 0]
 
-        batch_size = speech.shape[0]
+        batch_size, frames, _ = speech.shape
 
         # audio encoder
         encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
@@ -476,22 +476,35 @@
         # audio_adaptor
         encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
-        input_ids[input_ids == -1] = 0
-        input_ids[input_ids == -100] = 0
-        if hasattr(self.llm.model, "embed_tokens"):
-            inputs_embeds = self.llm.model.embed_tokens(input_ids)
-        elif hasattr(self.llm.model.model, "embed_tokens"):
-            inputs_embeds = self.llm.model.model.embed_tokens(input_ids)
-        else:
-            inputs_embeds = self.llm.model.model.model.embed_tokens(input_ids)
+        input_ids[input_ids < 0] = 0
+        inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
 
         batch_size, token_num, dims = inputs_embeds.shape
-        _, l, _ = encoder_out.shape
+        fbank_mask[fbank_mask < 0] = 0
+        fbank_fake_lens = fbank_mask.sum(-1).to(torch.int32)
+        # _, l, _ = encoder_out.shape
         for batch_idx in range(batch_size):
+
+            fbank_fake_len = fbank_fake_lens[batch_idx].item()
             fbank_beg_idx = fbank_beg[batch_idx, 0].item()
-            inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + l, :] = encoder_out[
-                batch_idx, :l, :
-            ]
+            min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+
+            try:
+                inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                    batch_idx, :min_len, :
+                ]
+            except Exception as e:
+                logging.error(f"{str(e)}, {traceback.format_exc()}")
+                logging.info(
+                    f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens[batch_idx].item()}"
+                )
+                fbank_fake_len = encoder_out_lens[batch_idx].item()
+                min_len = min(fbank_fake_len, min_len)
+                inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                    batch_idx, :min_len, :
+                ]
+
+        labels_ids[labels_ids == -1] = -100
 
         model_outputs = self.llm(
             inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
@@ -505,12 +518,146 @@
             stats["acc"] = acc_att
 
         stats["loss"] = torch.clone(loss.detach())
+        stats["batch_size"] = batch_size
+        stats["batch_size_x_frames"] = frames * batch_size
+        stats["batch_size_real_frames"] = speech_lengths.sum().item()
+        stats["padding_frames"] = stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
+        stats["batch_size_x_tokens"] = token_num * batch_size
+        stats["batch_size_real_tokens"] = attention_mask.sum().item()
+        stats["padding_tokens"] = stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
 
         # force_gatherable: to-device and to-tensor if scalar for DataParallel
         if self.length_normalized_loss:
             batch_size = int((labels_ids > 0 + 1).sum())
         loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
         return loss, stats, weight
+
+    def data_template(self, data):
+        system, user, assistant = [], [], []
+        for i, item in enumerate(data):
+            role = item["role"]
+            content = item["content"]
+            if role == "system":
+                system.append(content)
+            elif role == "user":
+                user.append(content)
+            elif role == "assistant":
+                assistant.append(content)
+
+        system = system * len(user)
+
+        contents = {
+            "system": system,
+            "user": user,
+            "assistant": assistant,
+        }
+
+        return contents
+
+    def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
+
+        system = contents["system"]
+        user = contents["user"]
+        assistant = contents["assistant"]
+        pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
+        input_ids, labels, source_ids, target_ids, fbank, fbank_lens, fbank_mask, fbank_beg = (
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+        )
+
+        for i, (system_prompt, user_prompt, target_out) in enumerate(zip(system, user, assistant)):
+
+            source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
+
+            splits = pattern.split(source_input)
+            source_ids_i = []
+            fbank_mask_i = []
+            fbank_beg_i = []
+            fbank_lens_i = []
+            # target_ids_i = []
+            for k, sub_str in enumerate(splits):
+                if not sub_str.startswith("<|startofspeech|>"):
+                    sub_token = tokenizer.encode(sub_str)
+                    source_ids_i += sub_token
+                    fbank_mask_i += [0] * len(sub_token)
+                else:
+                    sub_str = sub_str.replace("<|startofspeech|>", "").replace(
+                        "<|endofspeech|>", ""
+                    )
+                    if sub_str.startswith("!"):
+                        try:
+                            time1 = time.perf_counter()
+                            data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+                            time2 = time.perf_counter()
+                            meta_data["load_data"] = f"{time2 - time1:0.3f}"
+                        except Exception as e:
+                            logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
+
+                        speech, speech_lengths = extract_fbank(
+                            data_src,
+                            data_type=kwargs.get("data_type", "sound"),
+                            frontend=frontend,
+                            is_final=True,
+                        )  # speech: [b, T, d]
+
+                        time3 = time.perf_counter()
+                        meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+                        meta_data["batch_data_time"] = (
+                            speech_lengths.sum().item()
+                            * frontend.frame_shift
+                            * frontend.lfr_n
+                            / 1000
+                        )
+
+                        if kwargs.get("permute", True):
+                            speech = speech.permute(0, 2, 1)
+
+                        olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                        olens = 1 + (olens - 3 + 2 * 1) // 2
+                        sub_token_len = (olens - 1) // 2 + 1
+                        sub_token = [0] * sub_token_len
+                        fbank_beg_i = [len(source_ids_i)]
+                        source_ids_i += sub_token
+                        fbank_mask_i += [1] * len(sub_token)
+
+            source_mask = [-100] * len(source_ids_i)
+            target_out = f"{target_out}<|im_end|>"
+            target_ids = tokenizer.encode(target_out)
+            input_ids += source_ids_i + target_ids
+            labels += source_mask + target_ids
+            fbank_mask += fbank_mask_i
+            fbank_beg.append(fbank_beg_i)
+
+        input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
+        attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+        labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
+        source_ids = torch.tensor(source_ids_i, dtype=torch.int64)
+        target_ids = torch.tensor(target_ids, dtype=torch.int64)
+
+        fbank = speech[0, :, :]
+        fbank_lens = speech_lengths
+        fbank_mask = torch.tensor(fbank_mask, dtype=torch.float32)
+        fbank_beg = torch.tensor(fbank_beg, dtype=torch.int32)
+
+        output = {
+            "speech": fbank[None, :, :],
+            "speech_lengths": fbank_lens[:, None],
+            "fbank_mask": fbank_mask[None, :],
+            "fbank_beg": fbank_beg[None,],
+            "input_ids": input_ids[None, :],
+            "attention_mask": attention_mask[None, :],
+            "labels_ids": labels[None, :],
+            "source_ids": source_ids[None, :],
+            "target_ids": target_ids[None, :],
+        }
+
+        return output
 
     def inference(
         self,
@@ -522,92 +669,86 @@
         **kwargs,
     ):
 
-        prompt = kwargs.get("prompt", "Transcribe speech to text.")
+        meta_data = {}
+        prompt = kwargs.get("prompt", None)
 
         if kwargs.get("batch_size", 1) > 1:
             raise NotImplementedError("batch decoding is not implemented")
 
-        meta_data = {}
-        if (
-            isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
-        ):  # fbank
-            speech, speech_lengths = data_in, data_lengths
-            if len(speech.shape) < 3:
-                speech = speech[None, :, :]
-            if speech_lengths is None:
-                speech_lengths = speech.shape[1]
-        else:
-            # extract fbank feats
-            time1 = time.perf_counter()
-            audio_sample_list = load_audio_text_image_video(
-                data_in,
-                fs=frontend.fs,
-                audio_fs=kwargs.get("fs", 16000),
-                data_type=kwargs.get("data_type", "sound"),
-                tokenizer=tokenizer,
-            )
-            time2 = time.perf_counter()
-            meta_data["load_data"] = f"{time2 - time1:0.3f}"
-            speech, speech_lengths = extract_fbank(
-                audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
-            )
-            time3 = time.perf_counter()
-            meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
-            meta_data["batch_data_time"] = (
-                speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
-            )
+        contents = self.data_template(data_in[0])
+        output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
+        batch = to_device(output, kwargs["device"])
 
-        speech = speech.to(device=kwargs["device"])
-        speech_lengths = speech_lengths.to(device=kwargs["device"])
+        # audio encoder
+        speech = batch["speech"]
+        speech_lengths = batch["speech_lengths"][:, 0]
+        # fp16
+        if kwargs.get("fp16", False):
+            speech = speech.to(torch.float16)
+            encoder_out_lens = encoder_out_lens.to(torch.float16)
+        elif kwargs.get("bf16", False):
+            speech = speech.to(torch.bfloat16)
+            encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
 
-        # Encoder
-        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
+        # audio_adaptor
+        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
-        # adaptor
-        encoder_out = self.audio_adaptor(encoder_out)
+        input_ids = batch["input_ids"]
+        source_ids = batch["source_ids"]
+        if not kwargs.get("tearchforing", False):
+            input_ids = source_ids
+        input_ids[input_ids < 0] = 0
+        inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
 
-        prompt_pre = "USER: \nINSTRUCTION: {}\nINPUT: ".format(prompt)
-        prompt_ids = tokenizer.encode(prompt_pre)
-        prompt_length = len(prompt_ids)
-        prompt_ids = torch.tensor(prompt_ids, dtype=torch.int64).to(kwargs["device"])
+        batch_size, token_num, dims = inputs_embeds.shape
+        fbank_beg = batch["fbank_beg"]
+        for batch_idx in range(batch_size):
 
-        if hasattr(self.llm.model, "embed_tokens"):
-            inputs_embeds = self.llm.model.embed_tokens(prompt_ids)
-        elif hasattr(self.llm.model.model, "embed_tokens"):
-            inputs_embeds = self.llm.model.model.embed_tokens(prompt_ids)
-        else:
-            inputs_embeds = self.llm.model.model.model.embed_tokens(prompt_ids)
+            min_len = encoder_out_lens[batch_idx].item()
+            fbank_beg_idx = fbank_beg[batch_idx]
+            inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                batch_idx, :min_len, :
+            ]
 
-        inputs_embeds = torch.cat(
-            (inputs_embeds[None, :, :], encoder_out), dim=1
-        )  # [prompt, audio]
-        attention_mask = torch.ones(inputs_embeds.size()[:-1], dtype=torch.long).to(
-            kwargs["device"]
-        )
+        llm_dtype = kwargs.get("llm_dtype", "fp32")
+        dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
+        with torch.cuda.amp.autocast(dtype=dtype_map[llm_dtype]):
+            label = contents["assistant"][0]
+            # self.llm = self.llm.to(dtype_map[llm_dtype])
+            # inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
 
-        preds = self.llm.generate(
-            inputs_embeds=inputs_embeds,
-            max_length=kwargs.get("max_length", 200),
-            max_new_tokens=kwargs.get("max_new_tokens", 200),
-            num_beams=kwargs.get("num_beams", 4),
-            do_sample=kwargs.get("do_sample", False),
-            min_length=kwargs.get("min_length", 1),
-            top_p=kwargs.get("top_p", 1.0),
-            repetition_penalty=kwargs.get("repetition_penalty", 1.0),
-            length_penalty=kwargs.get("length_penalty", 1.0),
-            temperature=kwargs.get("temperature", 1.0),
-            attention_mask=attention_mask,
-            bos_token_id=tokenizer.bos_token_id,
-            eos_token_id=tokenizer.eos_token_id,
-            pad_token_id=tokenizer.pad_token_id,
-        )
+            if not kwargs.get("tearchforing", False):
 
-        text = tokenizer.batch_decode(preds, add_special_tokens=False, skip_special_tokens=True)
+                generated_ids = self.llm.generate(
+                    inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
+                )
+                # generated_ids = [
+                #     output_ids[len(input_id) :]
+                #     for input_id, output_ids in zip(input_ids, generated_ids)
+                # ]
+                response = tokenizer.batch_decode(
+                    generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
+                )[0]
 
-        text = text[0].split(": ")[-1]
-        text = text.strip()
+                loss = None
+            else:
 
-        # preds = torch.argmax(model_outputs.logits, -1)
+                labels_ids = batch["labels_ids"]
+                labels_ids[labels_ids == -1] = -100
+                attention_mask = batch.get("attention_mask", None)
+                # attention_mask = attention_mask.to(dtype_map[llm_dtype])
+                model_outputs = self.llm(
+                    inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+                )
+
+                preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
+                response = tokenizer.batch_decode(
+                    preds,
+                    add_special_tokens=False,
+                    skip_special_tokens=kwargs.get("skip_special_tokens", True),
+                )[0]
+                loss = model_outputs.loss.item()
 
         ibest_writer = None
         if kwargs.get("output_dir") is not None:
@@ -616,10 +757,15 @@
             ibest_writer = self.writer[f"{0 + 1}best_recog"]
 
         results = []
-        result_i = {"key": key[0], "text": text}
+        response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response)
+        result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label}
+        if loss is not None:
+            result_i["loss"] = loss
         results.append(result_i)
 
         if ibest_writer is not None:
-            ibest_writer["text"][key[0]] = text
+            ibest_writer["text"][key[0]] = response
+            ibest_writer["label"][key[0]] = label
+            ibest_writer["text_tn"][key[0]] = response_clean
 
         return results, meta_data

--
Gitblit v1.9.1