From fe588bc508c0076bb007d6ed36c18ac8ecb341ac Mon Sep 17 00:00:00 2001
From: 王梦迪 <73778524+di-osc@users.noreply.github.com>
Date: 星期二, 20 五月 2025 16:10:59 +0800
Subject: [PATCH] Fsmn_vad支持多线程并发调用 (#2519)
---
funasr/models/llm_asr_nar/model.py | 386 ++++++++++++++++++++++++++++++++++---------------------
1 files changed, 238 insertions(+), 148 deletions(-)
diff --git a/funasr/models/llm_asr_nar/model.py b/funasr/models/llm_asr_nar/model.py
index 30537cf..192c199 100644
--- a/funasr/models/llm_asr_nar/model.py
+++ b/funasr/models/llm_asr_nar/model.py
@@ -12,6 +12,7 @@
from funasr.models.ctc.ctc import CTC
from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
from funasr.metrics.compute_acc import th_accuracy, compute_accuracy
+
# from funasr.models.e2e_asr_common import ErrorCalculator
from funasr.train_utils.device_funcs import force_gatherable
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
@@ -24,7 +25,7 @@
@tables.register("model_classes", "LLMASRNAR")
class LLMASRNAR(nn.Module):
""" """
-
+
def __init__(
self,
specaug: str = None,
@@ -60,28 +61,32 @@
# postencoder: Optional[AbsPostEncoder] = None,
**kwargs,
):
-
+
super().__init__()
-
+
if specaug is not None:
specaug_class = tables.specaug_classes.get(specaug)
specaug = specaug_class(**specaug_conf)
if normalize is not None:
normalize_class = tables.normalize_classes.get(normalize)
normalize = normalize_class(**normalize_conf)
-
+
# audio encoder
hub = encoder_conf.get("hub", None)
if hub == "funasr":
from funasr import AutoModel
- init_param_path = encoder_conf.get("init_param_path", "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+
+ init_param_path = encoder_conf.get(
+ "init_param_path",
+ "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+ )
model = AutoModel(model=init_param_path, model_revision="master")
# frontend = model.kwargs.get("frontend")
model.model.decoder = None
-
+
self.audio_encoder = model.model
# self.frontend = frontend
-
+
elif hub == "hf":
pass
else:
@@ -108,14 +113,13 @@
param.requires_grad = False
model.eval()
self.llm = model
-
+
# adaptor
adaptor_class = tables.adaptor_classes.get(adaptor)
adaptor = adaptor_class(**adaptor_conf)
-
+
self.adaptor = adaptor
-
-
+
self.blank_id = blank_id
self.sos = sos if sos is not None else vocab_size - 1
self.eos = eos if eos is not None else vocab_size - 1
@@ -124,7 +128,6 @@
self.specaug = specaug
self.normalize = normalize
self.encoder = encoder
-
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
@@ -142,7 +145,7 @@
self.length_normalized_loss = length_normalized_loss
self.beam_search = None
-
+
def forward(
self,
speech: torch.Tensor,
@@ -150,7 +153,7 @@
text: torch.Tensor,
text_lengths: torch.Tensor,
input_ids: torch.Tensor,
- attention_mask:torch.Tensor,
+ attention_mask: torch.Tensor,
labels_ids: torch.Tensor,
label_mask: torch.Tensor,
audio_mask: torch.Tensor,
@@ -163,18 +166,16 @@
text: (Batch, Length)
text_lengths: (Batch,)
"""
- # import pdb;
- # pdb.set_trace()
if len(text_lengths.size()) > 1:
text_lengths = text_lengths[:, 0]
if len(speech_lengths.size()) > 1:
speech_lengths = speech_lengths[:, 0]
-
+
batch_size = speech.shape[0]
-
+
# audio encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, audio_mask=audio_mask)
-
+
# adaptor
encoder_out = self.adaptor(encoder_out)
@@ -191,13 +192,16 @@
if audio_mask is not None:
batch_size, token_num, dims = inputs_embeds.shape
_, l, _ = encoder_out.shape
- encoder_outs_pad = F.pad(encoder_out, (0, 0, token_num-l-1, 1, 0, 0), value=0.0)
- inputs_embeds = encoder_outs_pad * audio_mask[:, :, None] + inputs_embeds * (1.0-audio_mask[:, :, None])
+ encoder_outs_pad = F.pad(encoder_out, (0, 0, token_num - l - 1, 1, 0, 0), value=0.0)
+ inputs_embeds = encoder_outs_pad * audio_mask[:, :, None] + inputs_embeds * (
+ 1.0 - audio_mask[:, :, None]
+ )
inputs_embeds = F.pad(inputs_embeds[:, 1:, :], (0, 0, 0, 1, 0, 0), value=0.0)
- model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids)
+ model_outputs = self.llm(
+ inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+ )
loss = model_outputs.loss
-
stats = {}
with torch.no_grad():
@@ -212,11 +216,14 @@
batch_size = int((text_lengths + 1).sum())
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
-
+
def encode(
- self, speech: torch.Tensor, speech_lengths: torch.Tensor, **kwargs,
+ self,
+ speech: torch.Tensor,
+ speech_lengths: torch.Tensor,
+ **kwargs,
):
-
+
audio_mask = kwargs.get("audio_mask", None)
audio_token_lengths = audio_mask.sum(-1) if audio_mask is not None else None
text_token_int = kwargs.get("text_token_int", None)
@@ -227,32 +234,33 @@
enc, enc_lens = self.audio_encoder.encode(**batch)
with autocast(False):
enc_mask = sequence_mask(enc_lens, enc.size(1), device=enc.device)[:, None, :]
- pre_acoustic_embeds, pre_token_length, _, _ = self.audio_encoder.predictor(enc,
- mask=enc_mask,
- target_label_length=audio_token_lengths,
- )
+ pre_acoustic_embeds, pre_token_length, _, _ = self.audio_encoder.predictor(
+ enc,
+ mask=enc_mask,
+ target_label_length=audio_token_lengths,
+ )
return pre_acoustic_embeds, pre_token_length
+ def inference(
+ self,
+ data_in,
+ data_lengths=None,
+ key: list = None,
+ tokenizer=None,
+ frontend=None,
+ **kwargs,
+ ):
- def inference(self,
- data_in,
- data_lengths=None,
- key: list = None,
- tokenizer=None,
- frontend=None,
- **kwargs,
- ):
-
prompt = kwargs.get("prompt", "Transcribe speech to text.")
-
+
if kwargs.get("batch_size", 1) > 1:
raise NotImplementedError("batch decoding is not implemented")
-
-
meta_data = {}
- if isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank": # fbank
+ if (
+ isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
+ ): # fbank
speech, speech_lengths = data_in, data_lengths
if len(speech.shape) < 3:
speech = speech[None, :, :]
@@ -261,9 +269,13 @@
else:
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
- data_type=kwargs.get("data_type", "sound"),
- tokenizer=None)
+ audio_sample_list = load_audio_text_image_video(
+ data_in,
+ fs=frontend.fs,
+ audio_fs=kwargs.get("fs", 16000),
+ data_type=kwargs.get("data_type", "sound"),
+ tokenizer=None,
+ )
if len(kwargs.get("data_type", [])) > 1:
audio_sample_list, text_token_int_list = audio_sample_list
text_token_int = text_token_int_list[0].replace(" ", "")
@@ -272,27 +284,30 @@
text_token_int = None
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
- speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
- frontend=frontend)
+ speech, speech_lengths = extract_fbank(
+ audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
+ )
time3 = time.perf_counter()
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
- meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
-
+ meta_data["batch_data_time"] = (
+ speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
+ )
+
speech = speech.to(device=kwargs["device"])
speech_lengths = speech_lengths.to(device=kwargs["device"])
-
+
# Encoder
- encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, text_token_int=text_token_int)
+ encoder_out, encoder_out_lens = self.encode(
+ speech, speech_lengths, text_token_int=text_token_int
+ )
# adaptor
encoder_out = self.adaptor(encoder_out)
-
-
+
prompt_pre = "USER: \nINSTRUCTION: {}\nINPUT: ".format(prompt)
prompt_ids = tokenizer.encode(prompt_pre)
prompt_length = len(prompt_ids)
prompt_ids = torch.tensor(prompt_ids, dtype=torch.int64).to(kwargs["device"])
-
if hasattr(self.llm.model, "embed_tokens"):
inputs_embeds = self.llm.model.embed_tokens(prompt_ids)
@@ -301,9 +316,13 @@
else:
inputs_embeds = self.llm.model.model.model.embed_tokens(prompt_ids)
- inputs_embeds = torch.cat((inputs_embeds[None, :, :], encoder_out), dim=1) # [prompt, audio]
- attention_mask = torch.ones(inputs_embeds.size()[:-1], dtype=torch.long).to(kwargs["device"])
-
+ inputs_embeds = torch.cat(
+ (inputs_embeds[None, :, :], encoder_out), dim=1
+ ) # [prompt, audio]
+ attention_mask = torch.ones(inputs_embeds.size()[:-1], dtype=torch.long).to(
+ kwargs["device"]
+ )
+
# model_outputs = self.llm.generate(
# inputs_embeds=inputs_embeds,
# max_length=kwargs.get("max_length", 200),
@@ -321,16 +340,17 @@
# pad_token_id=tokenizer.pad_token_id
# )
-
- model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=None)
+ model_outputs = self.llm(
+ inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=None
+ )
preds = torch.argmax(model_outputs.logits, -1)
text = tokenizer.batch_decode(preds, add_special_tokens=False, skip_special_tokens=True)
- text = text[0].split(': ')[-1]
+ text = text[0].split(": ")[-1]
text = text.strip()
-
+
# preds = torch.argmax(model_outputs.logits, -1)
-
+
ibest_writer = None
if kwargs.get("output_dir") is not None:
if not hasattr(self, "writer"):
@@ -343,17 +363,14 @@
if ibest_writer is not None:
ibest_writer["text"][key[0]] = text
-
-
-
-
+
return results, meta_data
@tables.register("model_classes", "LLMASRNARPrompt")
class LLMASRNARPrompt(nn.Module):
""" """
-
+
def __init__(
self,
specaug: str = None,
@@ -366,7 +383,7 @@
decoder_conf: dict = None,
ctc: str = None,
ctc_conf: dict = None,
- ctc_weight: float = 0.5,
+ ctc_weight: float = 0.0,
llm: str = None,
llm_conf: dict = None,
adaptor: str = None,
@@ -390,43 +407,46 @@
# postencoder: Optional[AbsPostEncoder] = None,
**kwargs,
):
-
+
super().__init__()
-
+
if specaug is not None:
specaug_class = tables.specaug_classes.get(specaug)
specaug = specaug_class(**specaug_conf)
if normalize is not None:
normalize_class = tables.normalize_classes.get(normalize)
normalize = normalize_class(**normalize_conf)
-
+
# audio encoder
hub = encoder_conf.get("hub", None)
if hub == "funasr":
from funasr import AutoModel
- init_param_path = encoder_conf.get("init_param_path",
- "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+
+ init_param_path = encoder_conf.get(
+ "init_param_path",
+ "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+ )
model = AutoModel(model=init_param_path, model_revision="master")
# frontend = model.kwargs.get("frontend")
model.model.decoder = None
-
+
self.audio_encoder = model.model
# self.frontend = frontend
self.predictor_weight = predictor_weight
-
+
elif hub == "hf":
pass
else:
encoder_class = tables.encoder_classes.get(encoder)
encoder = encoder_class(input_size=input_size, **encoder_conf)
encoder_output_size = encoder.output_size()
-
+
# llm
hub = llm_conf.get("hub", "hf")
self.llm = None
if hub == "hf":
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
-
+
init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
model = AutoModelForCausalLM.from_pretrained(
init_param_path,
@@ -440,13 +460,13 @@
param.requires_grad = False
model.eval()
self.llm = model
-
+
# adaptor
adaptor_class = tables.adaptor_classes.get(adaptor)
adaptor = adaptor_class(**adaptor_conf)
-
+
self.adaptor = adaptor
-
+
self.blank_id = blank_id
self.sos = sos if sos is not None else vocab_size - 1
self.eos = eos if eos is not None else vocab_size - 1
@@ -455,7 +475,7 @@
self.specaug = specaug
self.normalize = normalize
self.encoder = encoder
-
+
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
@@ -470,10 +490,17 @@
# )
#
self.error_calculator = None
-
+
self.length_normalized_loss = length_normalized_loss
self.beam_search = None
-
+ if ctc_weight > 0.0:
+ if ctc_conf is None:
+ ctc_conf = {}
+
+ ctc = CTC(odim=vocab_size, encoder_output_size=adaptor_conf["encoder_dim"], **ctc_conf)
+ self.ctc_weight = ctc_weight
+ self.ctc = ctc
+
def forward(
self,
speech: torch.Tensor,
@@ -500,15 +527,26 @@
text_lengths = text_lengths[:, 0]
if len(speech_lengths.size()) > 1:
speech_lengths = speech_lengths[:, 0]
-
+
batch_size = speech.shape[0]
-
+
+ stats = {}
# audio encoder
- encoder_out, encoder_out_lens, loss_pre = self.encode(speech, speech_lengths, audio_mask=audio_mask)
-
+ outs = self.encode(speech, speech_lengths, audio_mask=audio_mask)
+ enc, enc_lens = outs[0], outs[1]
+ encoder_out, encoder_out_lens, loss_pre = outs[2], outs[3], outs[4]
+
+ # decoder: CTC branch
+
+ if self.ctc_weight != 0.0:
+ loss_ctc, cer_ctc = self._calc_ctc_loss(enc, enc_lens, text, text_lengths)
+
+ # Collect CTC branch stats
+ stats["loss_ctc"] = torch.clone(loss_ctc.detach()) if loss_ctc is not None else None
+
# adaptor
encoder_out = self.adaptor(encoder_out)
-
+
if input_ids is not None:
input_ids[input_ids == -1] = 0
input_ids[input_ids == -100] = 0
@@ -518,7 +556,7 @@
inputs_embeds = self.llm.model.model.embed_tokens(input_ids)
else:
inputs_embeds = self.llm.model.model.model.embed_tokens(input_ids)
-
+
if audio_mask is not None:
# inputs_embeds锛� [bos, prompt, input, pad, target]
prompt_bos_length = kwargs.get("prompt_bos_length", None)
@@ -526,74 +564,112 @@
prompt_bos_length = prompt_bos_length[0].item()
batch_size, token_num, dims = inputs_embeds.shape
_, l, _ = encoder_out.shape
- encoder_outs_pad = F.pad(encoder_out, (0, 0, prompt_bos_length, token_num - prompt_bos_length - l, 0, 0), value=0.0)
- inputs_embeds = encoder_outs_pad * audio_mask[:, :, None] + inputs_embeds * (1.0 - audio_mask[:, :, None])
- inputs_embeds = F.pad(inputs_embeds[:, 1:, :], (0, 0, 0, 1, 0, 0), value=0.0) # [prompt, input, pad, target, 0.0]
-
+ encoder_outs_pad = F.pad(
+ encoder_out,
+ (0, 0, prompt_bos_length, token_num - prompt_bos_length - l, 0, 0),
+ value=0.0,
+ )
+ inputs_embeds = encoder_outs_pad * audio_mask[:, :, None] + inputs_embeds * (
+ 1.0 - audio_mask[:, :, None]
+ )
+ inputs_embeds = F.pad(
+ inputs_embeds[:, 1:, :], (0, 0, 0, 1, 0, 0), value=0.0
+ ) # [prompt, input, pad, target, 0.0]
+
# labels_ids: [bos, prompt, input, target, eos] -> [-1, -1, input, target, eos]
# loss:
# inputs_embeds[:-1] -> [prompt, input, pad, target]
# labels_ids[1:] -> [prompt, input, target, eos] -> [-1, input, target, eos];
- model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids)
+ model_outputs = self.llm(
+ inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+ )
loss_llm = model_outputs.loss
+ stats["loss_llm"] = torch.clone(loss_llm.detach())
+ if self.ctc_weight > 0.0:
+ loss_llm = self.ctc_weight * loss_ctc + loss_llm
loss = loss_llm + loss_pre * self.predictor_weight
- stats = {}
+
with torch.no_grad():
preds = torch.argmax(model_outputs.logits, -1)
acc_att = compute_accuracy(preds[:, :-1], labels_ids[:, 1:], ignore_label=-100)
stats["acc"] = acc_att
-
-
+
stats["loss_pre"] = torch.clone(loss_pre.detach())
- stats["loss_llm"] = torch.clone(loss_llm.detach())
stats["loss"] = torch.clone(loss.detach())
-
+ stats["batch_size"] = batch_size
+
# force_gatherable: to-device and to-tensor if scalar for DataParallel
if self.length_normalized_loss:
batch_size = int((text_lengths + 1).sum())
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
-
+
def encode(
- self, speech: torch.Tensor, speech_lengths: torch.Tensor, **kwargs,
+ self,
+ speech: torch.Tensor,
+ speech_lengths: torch.Tensor,
+ **kwargs,
):
-
+
audio_mask = kwargs.get("audio_mask", None)
audio_token_lengths = audio_mask.sum(-1) if audio_mask is not None else None
text_token_int = kwargs.get("text_token_int", None)
if audio_token_lengths is None and text_token_int is not None:
audio_token_lengths = torch.tensor([len(text_token_int)], dtype=torch.int64)
-
+
batch = {"speech": speech, "speech_lengths": speech_lengths}
enc, enc_lens = self.audio_encoder.encode(**batch)
with autocast(False):
enc_mask = sequence_mask(enc_lens, enc.size(1), device=enc.device)[:, None, :]
- pre_acoustic_embeds, pre_token_length, _, _ = self.audio_encoder.predictor(enc,
- mask=enc_mask,
- target_label_length=audio_token_lengths,
- )
+ pre_acoustic_embeds, pre_token_length, _, _ = self.audio_encoder.predictor(
+ enc,
+ mask=enc_mask,
+ target_label_length=audio_token_lengths,
+ )
loss_pre = 0.0
if audio_token_lengths is not None:
- loss_pre = self.criterion_pre(audio_token_lengths.type_as(pre_token_length), pre_token_length)
-
- return pre_acoustic_embeds, pre_token_length, loss_pre
-
- def inference(self,
- data_in,
- data_lengths=None,
- key: list = None,
- tokenizer=None,
- frontend=None,
- **kwargs,
- ):
-
+ loss_pre = self.criterion_pre(
+ audio_token_lengths.type_as(pre_token_length), pre_token_length
+ )
+
+ return enc, enc_lens, pre_acoustic_embeds, pre_token_length, loss_pre
+
+ def _calc_ctc_loss(
+ self,
+ encoder_out: torch.Tensor,
+ encoder_out_lens: torch.Tensor,
+ ys_pad: torch.Tensor,
+ ys_pad_lens: torch.Tensor,
+ ):
+ # Calc CTC loss
+ loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
+
+ # Calc CER using CTC
+ cer_ctc = None
+ if not self.training and self.error_calculator is not None:
+ ys_hat = self.ctc.argmax(encoder_out).data
+ cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
+ return loss_ctc, cer_ctc
+
+ def inference(
+ self,
+ data_in,
+ data_lengths=None,
+ key: list = None,
+ tokenizer=None,
+ frontend=None,
+ **kwargs,
+ ):
+
prompt = kwargs.get("prompt", "Transcribe speech to text.")
-
+
if kwargs.get("batch_size", 1) > 1:
raise NotImplementedError("batch decoding is not implemented")
-
+
meta_data = {}
- if isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank": # fbank
+ if (
+ isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
+ ): # fbank
speech, speech_lengths = data_in, data_lengths
if len(speech.shape) < 3:
speech = speech[None, :, :]
@@ -602,9 +678,13 @@
else:
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
- data_type=kwargs.get("data_type", "sound"),
- tokenizer=None)
+ audio_sample_list = load_audio_text_image_video(
+ data_in,
+ fs=frontend.fs,
+ audio_fs=kwargs.get("fs", 16000),
+ data_type=kwargs.get("data_type", "sound"),
+ tokenizer=None,
+ )
if len(kwargs.get("data_type", [])) > 1:
audio_sample_list, text_token_int_list = audio_sample_list
text_token_int = text_token_int_list[0]
@@ -615,22 +695,25 @@
text_token_int = None
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
- speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
- frontend=frontend)
+ speech, speech_lengths = extract_fbank(
+ audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
+ )
time3 = time.perf_counter()
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
- meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
-
+ meta_data["batch_data_time"] = (
+ speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
+ )
+
speech = speech.to(device=kwargs["device"])
speech_lengths = speech_lengths.to(device=kwargs["device"])
-
+
# Encoder
res = self.encode(speech, speech_lengths, text_token_int=text_token_int)
encoder_out = res[0]
-
+
# adaptor
encoder_out = self.adaptor(encoder_out)
-
+
prompt_pre = "USER: \nINSTRUCTION: {}\nINPUT: ".format(prompt)
prompt_ids = tokenizer.encode(prompt_pre)
if prompt_ids[0] == tokenizer.bos_token_id:
@@ -639,7 +722,7 @@
prompt_length = len(prompt_ids)
prompt_ids = torch.tensor(prompt_ids, dtype=torch.int64).to(kwargs["device"])
pad = torch.tensor([tokenizer.pad_token_id], dtype=torch.int64).to(kwargs["device"])
-
+
if hasattr(self.llm.model, "embed_tokens"):
inputs_embeds = self.llm.model.embed_tokens(prompt_ids)
pad = self.llm.model.embed_tokens(pad)
@@ -647,10 +730,15 @@
inputs_embeds = self.llm.model.model.embed_tokens(prompt_ids)
else:
inputs_embeds = self.llm.model.model.model.embed_tokens(prompt_ids)
-
- inputs_embeds = torch.cat((inputs_embeds[None, :, :], encoder_out, pad[None, :, :]), dim=1) # [prompt, audio]
- attention_mask = torch.ones(inputs_embeds.size()[:-1], dtype=torch.long).to(kwargs["device"])
-
+
+ # inputs_embeds = torch.cat((inputs_embeds[None, :, :], encoder_out, pad[None, :, :]), dim=1) # [prompt, audio, pad]
+ inputs_embeds = torch.cat(
+ (inputs_embeds[None, :, :], encoder_out), dim=1
+ ) # [prompt, audio]
+ attention_mask = torch.ones(inputs_embeds.size()[:-1], dtype=torch.long).to(
+ kwargs["device"]
+ )
+
# model_outputs = self.llm.generate(
# inputs_embeds=inputs_embeds,
# max_length=kwargs.get("max_length", 200),
@@ -667,30 +755,32 @@
# eos_token_id=tokenizer.eos_token_id,
# pad_token_id=tokenizer.pad_token_id
# )
-
- model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=None)
+
+ model_outputs = self.llm(
+ inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=None
+ )
preds = torch.argmax(model_outputs.logits, -1)
text = tokenizer.batch_decode(preds, add_special_tokens=False, skip_special_tokens=True)
-
- text = text[0].split(':')[-1]
+
+ text = text[0].split(":")[-1]
text = text.strip()
if text.startswith("Please\n "):
text = text.replace("Please\n ", "")
text = text.strip()
-
+
# preds = torch.argmax(model_outputs.logits, -1)
-
+
ibest_writer = None
if kwargs.get("output_dir") is not None:
if not hasattr(self, "writer"):
self.writer = DatadirWriter(kwargs.get("output_dir"))
ibest_writer = self.writer[f"{0 + 1}best_recog"]
-
+
results = []
result_i = {"key": key[0], "text": text}
results.append(result_i)
-
+
if ibest_writer is not None:
ibest_writer["text"][key[0]] = text
-
+
return results, meta_data
--
Gitblit v1.9.1