From 59bc02b089f7a626fe67907dcfc695eae6883f82 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 14 六月 2024 13:59:49 +0800
Subject: [PATCH] decoding

---
 funasr/models/llm_asr/model.py |  232 +++++++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 175 insertions(+), 57 deletions(-)

diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 78d9340..84d7d33 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -6,7 +6,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 from torch.cuda.amp import autocast
-
+import re
 from funasr.models.scama.utils import sequence_mask
 from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
 from funasr.models.ctc.ctc import CTC
@@ -19,6 +19,9 @@
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.register import tables
 from funasr.train_utils.device_funcs import to_device
+import traceback
+
+dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
 
 
 @tables.register("model_classes", "LLMASR")
@@ -406,38 +409,60 @@
             audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
             audio_encoder_output_size = audio_encoder.output_size()
         freeze = audio_encoder_conf.get("freeze", True)
+        freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
+        # if freeze_layer_num > 0:
+        #     freeze_layer_num = range(freeze_layer_num)
+
         if freeze:
             for name, param in audio_encoder.named_parameters():
-                param.requires_grad = False
+                if freeze_layer_num > 0:
+                    idx = re.search(r"\.\d+\.", name)
+                    if idx is not None:
+                        beg, end = idx.regs[0]
+                        layer_id = int(name[beg + 1 : end - 1])
+                        if layer_id < freeze_layer_num:
+                            param.requires_grad = False
+                    elif "ln_post." not in name:
+                        param.requires_grad = False
+                else:
+                    param.requires_grad = False
+
             audio_encoder.eval()
 
         self.audio_encoder = audio_encoder
 
         # llm
-        hub = llm_conf.get("hub", "hf")
         self.llm = None
-        if hub == "hf":
-            from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
 
-            init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
+        from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
 
-            model = AutoModelForCausalLM.from_pretrained(
-                init_param_path,
-                load_in_8bit=None,
-                device_map=None,
-                use_cache=None,
-            )
-            freeze = llm_conf.get("freeze", True)
-            if freeze:
-                for name, param in model.named_parameters():
-                    param.requires_grad = False
-                model.eval()
-            self.llm = model
+        init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
+
+        model = AutoModelForCausalLM.from_pretrained(
+            init_param_path,
+            load_in_8bit=None,
+            device_map=None,
+            use_cache=None,
+        )
+        freeze = llm_conf.get("freeze", True)
+        if freeze:
+            for name, param in model.named_parameters():
+                param.requires_grad = False
+            model.eval()
+        self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
+        self.llm = model.to(dtype_map[self.llm_dtype])
+        llm_dim = model.get_input_embeddings().weight.shape[-1]
 
         # adaptor
         adaptor_class = tables.adaptor_classes.get(audio_adaptor)
         audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
+        audio_adaptor_conf["llm_dim"] = llm_dim
         audio_adaptor = adaptor_class(**audio_adaptor_conf)
+        init_param_path = audio_adaptor_conf.get("init_param_path", None)
+        if init_param_path is not None:
+            src_state = torch.load(init_param_path, map_location="cpu")
+            flag = audio_adaptor.load_state_dict(src_state, strict=False)
+            logging.info(f"Loading audio_adaptor ckpt: {init_param_path}, status: {flag}")
 
         self.audio_adaptor = audio_adaptor
 
@@ -471,11 +496,12 @@
 
         batch_size, frames, _ = speech.shape
 
-        # audio encoder
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        with torch.cuda.amp.autocast(enabled=False):
+            # audio encoder
+            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
-        # audio_adaptor
-        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+            # audio_adaptor
+            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
         input_ids[input_ids < 0] = 0
         inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
@@ -489,6 +515,7 @@
             fbank_fake_len = fbank_fake_lens[batch_idx].item()
             fbank_beg_idx = fbank_beg[batch_idx, 0].item()
             min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+
             try:
                 inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
                     batch_idx, :min_len, :
@@ -496,20 +523,25 @@
             except Exception as e:
                 logging.error(f"{str(e)}, {traceback.format_exc()}")
                 logging.info(
-                    f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}"
+                    f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens[batch_idx].item()}"
                 )
                 fbank_fake_len = encoder_out_lens[batch_idx].item()
-                min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+                min_len = min(fbank_fake_len, min_len)
                 inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
                     batch_idx, :min_len, :
                 ]
 
-        labels_ids[labels_ids == -1] = -100
-
-        model_outputs = self.llm(
-            inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
-        )
-        loss = model_outputs.loss
+        with torch.cuda.amp.autocast(
+            enabled=True if self.llm_dtype != "fp32" else False, dtype=dtype_map[self.llm_dtype]
+        ):
+            labels_ids[labels_ids == -1] = -100
+            attention_mask[attention_mask < 0] = 0
+            model_outputs = self.llm(
+                inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
+                attention_mask=attention_mask,
+                labels=labels_ids,
+            )
+            loss = model_outputs.loss
 
         stats = {}
         with torch.no_grad():
@@ -532,7 +564,13 @@
         loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
         return loss, stats, weight
 
-    def data_template(self, data_in):
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        return encoder_out, encoder_out_lens
+
+    def data_template(self, data):
         system, user, assistant = [], [], []
         for i, item in enumerate(data):
             role = item["role"]
@@ -554,27 +592,37 @@
 
         return contents
 
-    def data_load_speech(self, contents: dict, tokenizer, frontend, **kwargs):
+    def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
 
         system = contents["system"]
         user = contents["user"]
         assistant = contents["assistant"]
         pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
-        input_ids, labels, fbank, fbank_lens, fbank_mask, fbank_beg = [], [], [], [], [], []
+        input_ids, labels, source_ids, target_ids, fbank, fbank_lens, fbank_mask, fbank_beg = (
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+        )
 
         for i, (system_prompt, user_prompt, target_out) in enumerate(zip(system, user, assistant)):
 
             source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
 
             splits = pattern.split(source_input)
-            source_ids = []
+            source_ids_i = []
             fbank_mask_i = []
             fbank_beg_i = []
             fbank_lens_i = []
+            # target_ids_i = []
             for k, sub_str in enumerate(splits):
                 if not sub_str.startswith("<|startofspeech|>"):
                     sub_token = tokenizer.encode(sub_str)
-                    source_ids += sub_token
+                    source_ids_i += sub_token
                     fbank_mask_i += [0] * len(sub_token)
                 else:
                     sub_str = sub_str.replace("<|startofspeech|>", "").replace(
@@ -582,7 +630,10 @@
                     )
                     if sub_str.startswith("!"):
                         try:
+                            time1 = time.perf_counter()
                             data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+                            time2 = time.perf_counter()
+                            meta_data["load_data"] = f"{time2 - time1:0.3f}"
                         except Exception as e:
                             logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
 
@@ -593,6 +644,15 @@
                             is_final=True,
                         )  # speech: [b, T, d]
 
+                        time3 = time.perf_counter()
+                        meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+                        meta_data["batch_data_time"] = (
+                            speech_lengths.sum().item()
+                            * frontend.frame_shift
+                            * frontend.lfr_n
+                            / 1000
+                        )
+
                         if kwargs.get("permute", True):
                             speech = speech.permute(0, 2, 1)
 
@@ -600,14 +660,14 @@
                         olens = 1 + (olens - 3 + 2 * 1) // 2
                         sub_token_len = (olens - 1) // 2 + 1
                         sub_token = [0] * sub_token_len
-                        fbank_beg_i = [len(source_ids)]
-                        source_ids += sub_token
+                        fbank_beg_i = [len(source_ids_i)]
+                        source_ids_i += sub_token
                         fbank_mask_i += [1] * len(sub_token)
 
-            source_mask = [-100] * len(source_ids)
+            source_mask = [-100] * len(source_ids_i)
             target_out = f"{target_out}<|im_end|>"
             target_ids = tokenizer.encode(target_out)
-            input_ids += source_ids + target_ids
+            input_ids += source_ids_i + target_ids
             labels += source_mask + target_ids
             fbank_mask += fbank_mask_i
             fbank_beg.append(fbank_beg_i)
@@ -615,7 +675,7 @@
         input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
         attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
         labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
-        source_ids = torch.tensor(source_ids, dtype=torch.int64)
+        source_ids = torch.tensor(source_ids_i, dtype=torch.int64)
         target_ids = torch.tensor(target_ids, dtype=torch.int64)
 
         fbank = speech[0, :, :]
@@ -653,21 +713,27 @@
         if kwargs.get("batch_size", 1) > 1:
             raise NotImplementedError("batch decoding is not implemented")
 
-        contents = self.data_template(data_in)
-        output = self.data_load_speech(contents, tokenizer, frontend, **kwargs)
+        contents = self.data_template(data_in[0])
+        output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
         batch = to_device(output, kwargs["device"])
 
         # audio encoder
         speech = batch["speech"]
         speech_lengths = batch["speech_lengths"][:, 0]
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        # fp16
+        if kwargs.get("fp16", False):
+            speech = speech.to(torch.float16)
+        elif kwargs.get("bf16", False):
+            speech = speech.to(torch.bfloat16)
+        # audio encoder
+        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
         # audio_adaptor
         encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
         input_ids = batch["input_ids"]
         source_ids = batch["source_ids"]
-        if kwargs.get("tearchforing", False):
+        if not kwargs.get("tearchforing", False):
             input_ids = source_ids
         input_ids[input_ids < 0] = 0
         inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
@@ -682,19 +748,49 @@
                 batch_idx, :min_len, :
             ]
 
-        if not kwargs.get("tearchforing", False):
+        llm_dtype = kwargs.get("llm_dtype", "fp32")
+        if llm_dtype == "fp32":
+            llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
+            llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
 
-            generated_ids = self.llm.generate(
-                inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
-            )
-            generated_ids = [
-                output_ids[len(input_id) :]
-                for input_id, output_ids in zip(input_ids, generated_ids)
-            ]
-            response = tokenizer.batch_decode(
-                generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
-            )[0]
+        with torch.cuda.amp.autocast(
+            enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
+        ):
             label = contents["assistant"][0]
+            self.llm = self.llm.to(dtype_map[llm_dtype])
+            inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
+
+            if not kwargs.get("tearchforing", False):
+
+                generated_ids = self.llm.generate(
+                    inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
+                )
+                # generated_ids = [
+                #     output_ids[len(input_id) :]
+                #     for input_id, output_ids in zip(input_ids, generated_ids)
+                # ]
+                response = tokenizer.batch_decode(
+                    generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
+                )[0]
+
+                loss = None
+            else:
+
+                labels_ids = batch["labels_ids"]
+                labels_ids[labels_ids == -1] = -100
+                attention_mask = batch.get("attention_mask", None)
+                # attention_mask = attention_mask.to(dtype_map[llm_dtype])
+                model_outputs = self.llm(
+                    inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+                )
+
+                preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
+                response = tokenizer.batch_decode(
+                    preds,
+                    add_special_tokens=False,
+                    skip_special_tokens=kwargs.get("skip_special_tokens", True),
+                )[0]
+                loss = model_outputs.loss.item()
 
         ibest_writer = None
         if kwargs.get("output_dir") is not None:
@@ -703,11 +799,33 @@
             ibest_writer = self.writer[f"{0 + 1}best_recog"]
 
         results = []
-        result_i = {"key": key[0], "text": response, "label": label}
+        response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response)
+        result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label}
+        if loss is not None:
+            result_i["loss"] = loss
         results.append(result_i)
 
         if ibest_writer is not None:
-            ibest_writer["text"][key[0]] = text
+            ibest_writer["text"][key[0]] = response
             ibest_writer["label"][key[0]] = label
+            ibest_writer["text_tn"][key[0]] = response_clean
 
         return results, meta_data
+
+
+@tables.register("model_classes", "LLMASR3")
+class LLMASR3(nn.Module):
+    """ """
+
+    def __init__(
+        self,
+        *args,
+        **kwargs,
+    ):
+
+        super().__init__(*args, **kwargs)
+
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
+        return encoder_out, encoder_out_lens

--
Gitblit v1.9.1