From d43f77408b8f3e169c59dfb6b6d82e45e6b91714 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 19:19:06 +0800
Subject: [PATCH] decoding
---
funasr/models/llm_asr/model.py | 122 +++++++++++++++++++++++++++++++---------
1 files changed, 93 insertions(+), 29 deletions(-)
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 78d9340..dd806cf 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -6,7 +6,7 @@
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast
-
+import re
from funasr.models.scama.utils import sequence_mask
from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
from funasr.models.ctc.ctc import CTC
@@ -19,6 +19,7 @@
from funasr.utils.datadir_writer import DatadirWriter
from funasr.register import tables
from funasr.train_utils.device_funcs import to_device
+import traceback
@tables.register("model_classes", "LLMASR")
@@ -489,6 +490,7 @@
fbank_fake_len = fbank_fake_lens[batch_idx].item()
fbank_beg_idx = fbank_beg[batch_idx, 0].item()
min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+
try:
inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
batch_idx, :min_len, :
@@ -496,10 +498,10 @@
except Exception as e:
logging.error(f"{str(e)}, {traceback.format_exc()}")
logging.info(
- f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}"
+ f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens[batch_idx].item()}"
)
fbank_fake_len = encoder_out_lens[batch_idx].item()
- min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+ min_len = min(fbank_fake_len, min_len)
inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
batch_idx, :min_len, :
]
@@ -532,7 +534,7 @@
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
- def data_template(self, data_in):
+ def data_template(self, data):
system, user, assistant = [], [], []
for i, item in enumerate(data):
role = item["role"]
@@ -554,27 +556,37 @@
return contents
- def data_load_speech(self, contents: dict, tokenizer, frontend, **kwargs):
+ def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
system = contents["system"]
user = contents["user"]
assistant = contents["assistant"]
pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
- input_ids, labels, fbank, fbank_lens, fbank_mask, fbank_beg = [], [], [], [], [], []
+ input_ids, labels, source_ids, target_ids, fbank, fbank_lens, fbank_mask, fbank_beg = (
+ [],
+ [],
+ [],
+ [],
+ [],
+ [],
+ [],
+ [],
+ )
for i, (system_prompt, user_prompt, target_out) in enumerate(zip(system, user, assistant)):
source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
splits = pattern.split(source_input)
- source_ids = []
+ source_ids_i = []
fbank_mask_i = []
fbank_beg_i = []
fbank_lens_i = []
+ # target_ids_i = []
for k, sub_str in enumerate(splits):
if not sub_str.startswith("<|startofspeech|>"):
sub_token = tokenizer.encode(sub_str)
- source_ids += sub_token
+ source_ids_i += sub_token
fbank_mask_i += [0] * len(sub_token)
else:
sub_str = sub_str.replace("<|startofspeech|>", "").replace(
@@ -582,7 +594,10 @@
)
if sub_str.startswith("!"):
try:
+ time1 = time.perf_counter()
data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+ time2 = time.perf_counter()
+ meta_data["load_data"] = f"{time2 - time1:0.3f}"
except Exception as e:
logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
@@ -593,6 +608,15 @@
is_final=True,
) # speech: [b, T, d]
+ time3 = time.perf_counter()
+ meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+ meta_data["batch_data_time"] = (
+ speech_lengths.sum().item()
+ * frontend.frame_shift
+ * frontend.lfr_n
+ / 1000
+ )
+
if kwargs.get("permute", True):
speech = speech.permute(0, 2, 1)
@@ -600,14 +624,14 @@
olens = 1 + (olens - 3 + 2 * 1) // 2
sub_token_len = (olens - 1) // 2 + 1
sub_token = [0] * sub_token_len
- fbank_beg_i = [len(source_ids)]
- source_ids += sub_token
+ fbank_beg_i = [len(source_ids_i)]
+ source_ids_i += sub_token
fbank_mask_i += [1] * len(sub_token)
- source_mask = [-100] * len(source_ids)
+ source_mask = [-100] * len(source_ids_i)
target_out = f"{target_out}<|im_end|>"
target_ids = tokenizer.encode(target_out)
- input_ids += source_ids + target_ids
+ input_ids += source_ids_i + target_ids
labels += source_mask + target_ids
fbank_mask += fbank_mask_i
fbank_beg.append(fbank_beg_i)
@@ -615,7 +639,7 @@
input_ids = torch.tensor(input_ids, dtype=torch.int64) # [: self.max_token_length]
attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
- source_ids = torch.tensor(source_ids, dtype=torch.int64)
+ source_ids = torch.tensor(source_ids_i, dtype=torch.int64)
target_ids = torch.tensor(target_ids, dtype=torch.int64)
fbank = speech[0, :, :]
@@ -653,13 +677,18 @@
if kwargs.get("batch_size", 1) > 1:
raise NotImplementedError("batch decoding is not implemented")
- contents = self.data_template(data_in)
- output = self.data_load_speech(contents, tokenizer, frontend, **kwargs)
+ contents = self.data_template(data_in[0])
+ output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
batch = to_device(output, kwargs["device"])
# audio encoder
speech = batch["speech"]
speech_lengths = batch["speech_lengths"][:, 0]
+ # fp16
+ if kwargs.get("fp16", False):
+ speech = speech.to(torch.float16)
+ elif kwargs.get("bf16", False):
+ speech = speech.to(torch.bfloat16)
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
# audio_adaptor
@@ -667,7 +696,7 @@
input_ids = batch["input_ids"]
source_ids = batch["source_ids"]
- if kwargs.get("tearchforing", False):
+ if not kwargs.get("tearchforing", False):
input_ids = source_ids
input_ids[input_ids < 0] = 0
inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
@@ -682,19 +711,50 @@
batch_idx, :min_len, :
]
- if not kwargs.get("tearchforing", False):
+ llm_dtype = kwargs.get("llm_dtype", "fp32")
+ if llm_dtype == "fp32":
+ llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
+ llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
- generated_ids = self.llm.generate(
- inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
- )
- generated_ids = [
- output_ids[len(input_id) :]
- for input_id, output_ids in zip(input_ids, generated_ids)
- ]
- response = tokenizer.batch_decode(
- generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
- )[0]
+ dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
+ with torch.cuda.amp.autocast(
+ enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
+ ):
label = contents["assistant"][0]
+ self.llm = self.llm.to(dtype_map[llm_dtype])
+ inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
+
+ if not kwargs.get("tearchforing", False):
+
+ generated_ids = self.llm.generate(
+ inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
+ )
+ # generated_ids = [
+ # output_ids[len(input_id) :]
+ # for input_id, output_ids in zip(input_ids, generated_ids)
+ # ]
+ response = tokenizer.batch_decode(
+ generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
+ )[0]
+
+ loss = None
+ else:
+
+ labels_ids = batch["labels_ids"]
+ labels_ids[labels_ids == -1] = -100
+ attention_mask = batch.get("attention_mask", None)
+ # attention_mask = attention_mask.to(dtype_map[llm_dtype])
+ model_outputs = self.llm(
+ inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+ )
+
+ preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
+ response = tokenizer.batch_decode(
+ preds,
+ add_special_tokens=False,
+ skip_special_tokens=kwargs.get("skip_special_tokens", True),
+ )[0]
+ loss = model_outputs.loss.item()
ibest_writer = None
if kwargs.get("output_dir") is not None:
@@ -703,11 +763,15 @@
ibest_writer = self.writer[f"{0 + 1}best_recog"]
results = []
- result_i = {"key": key[0], "text": response, "label": label}
+ response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response)
+ result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label}
+ if loss is not None:
+ result_i["loss"] = loss
results.append(result_i)
if ibest_writer is not None:
- ibest_writer["text"][key[0]] = text
+ ibest_writer["text"][key[0]] = response
ibest_writer["label"][key[0]] = label
+ ibest_writer["text_tn"][key[0]] = response_clean
return results, meta_data
--
Gitblit v1.9.1