From 9b4e9cc8a0311e5243d69b73ed073e7ea441982e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 三月 2024 16:05:29 +0800
Subject: [PATCH] train update
---
funasr/models/contextual_paraformer/model.py | 61 ++++++++++++------------------
1 files changed, 25 insertions(+), 36 deletions(-)
diff --git a/funasr/models/contextual_paraformer/model.py b/funasr/models/contextual_paraformer/model.py
index 10bbf9d..b9fd3c4 100644
--- a/funasr/models/contextual_paraformer/model.py
+++ b/funasr/models/contextual_paraformer/model.py
@@ -17,9 +17,6 @@
from distutils.version import LooseVersion
from funasr.register import tables
-from funasr.losses.label_smoothing_loss import (
- LabelSmoothingLoss, # noqa: H301
-)
from funasr.utils import postprocess_utils
from funasr.metrics.compute_acc import th_accuracy
from funasr.models.paraformer.model import Paraformer
@@ -29,7 +26,7 @@
from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
-import pdb
+
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
@@ -81,7 +78,6 @@
self.attn_loss = torch.nn.L1Loss()
self.crit_attn_smooth = crit_attn_smooth
-
def forward(
self,
speech: torch.Tensor,
@@ -98,21 +94,18 @@
text: (Batch, Length)
text_lengths: (Batch,)
"""
- if len(text_lengths.size()) > 1:
- text_lengths = text_lengths[:, 0]
- if len(speech_lengths.size()) > 1:
- speech_lengths = speech_lengths[:, 0]
- pdb.set_trace()
+ text_lengths = text_lengths.squeeze()
+ speech_lengths = speech_lengths.squeeze()
+
batch_size = speech.shape[0]
hotword_pad = kwargs.get("hotword_pad")
hotword_lengths = kwargs.get("hotword_lengths")
- dha_pad = kwargs.get("dha_pad")
- pdb.set_trace()
+ # dha_pad = kwargs.get("dha_pad")
+
# 1. Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
- pdb.set_trace()
loss_ctc, cer_ctc = None, None
stats = dict()
@@ -127,12 +120,11 @@
stats["loss_ctc"] = loss_ctc.detach() if loss_ctc is not None else None
stats["cer_ctc"] = cer_ctc
- pdb.set_trace()
# 2b. Attention decoder branch
loss_att, acc_att, cer_att, wer_att, loss_pre, loss_ideal = self._calc_att_clas_loss(
encoder_out, encoder_out_lens, text, text_lengths, hotword_pad, hotword_lengths
)
- pdb.set_trace()
+
# 3. CTC-Att loss definition
if self.ctc_weight == 0.0:
loss = loss_att + loss_pre * self.predictor_weight
@@ -158,7 +150,6 @@
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
-
def _calc_att_clas_loss(
self,
encoder_out: torch.Tensor,
@@ -170,38 +161,33 @@
):
encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
encoder_out.device)
- pdb.set_trace()
+
if self.predictor_bias == 1:
_, ys_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_pad_lens = ys_pad_lens + self.predictor_bias
- pdb.set_trace()
+
pre_acoustic_embeds, pre_token_length, _, _ = self.predictor(encoder_out, ys_pad, encoder_out_mask,
ignore_id=self.ignore_id)
- pdb.set_trace()
# -1. bias encoder
if self.use_decoder_embedding:
hw_embed = self.decoder.embed(hotword_pad)
else:
hw_embed = self.bias_embed(hotword_pad)
- pdb.set_trace()
+
hw_embed, (_, _) = self.bias_encoder(hw_embed)
- pdb.set_trace()
_ind = np.arange(0, hotword_pad.shape[0]).tolist()
selected = hw_embed[_ind, [i - 1 for i in hotword_lengths.detach().cpu().tolist()]]
contextual_info = selected.squeeze(0).repeat(ys_pad.shape[0], 1, 1).to(ys_pad.device)
- pdb.set_trace()
+
# 0. sampler
decoder_out_1st = None
if self.sampling_ratio > 0.0:
- if self.step_cur < 2:
- logging.info("enable sampler in paraformer, sampling_ratio: {}".format(self.sampling_ratio))
+
sematic_embeds, decoder_out_1st = self.sampler(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens,
pre_acoustic_embeds, contextual_info)
else:
- if self.step_cur < 2:
- logging.info("disable sampler in paraformer, sampling_ratio: {}".format(self.sampling_ratio))
sematic_embeds = pre_acoustic_embeds
- pdb.set_trace()
+
# 1. Forward decoder
decoder_outs = self.decoder(
encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, contextual_info=contextual_info
@@ -217,7 +203,7 @@
loss_ideal = None
'''
loss_ideal = None
- pdb.set_trace()
+
if decoder_out_1st is None:
decoder_out_1st = decoder_out
# 2. Compute attention loss
@@ -237,7 +223,6 @@
cer_att, wer_att = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
return loss_att, acc_att, cer_att, wer_att, loss_pre, loss_ideal
-
def sampler(self, encoder_out, encoder_out_lens, ys_pad, ys_pad_lens, pre_acoustic_embeds, contextual_info):
tgt_mask = (~make_pad_mask(ys_pad_lens, maxlen=ys_pad_lens.max())[:, :, None]).to(ys_pad.device)
@@ -271,7 +256,6 @@
input_mask_expand_dim, 0)
return sematic_embeds * tgt_mask, decoder_out * tgt_mask
-
def cal_decoder_with_predictor(self, encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, hw_list=None,
clas_scale=1.0):
if hw_list is None:
@@ -294,11 +278,11 @@
enforce_sorted=False)
_, (h_n, _) = self.bias_encoder(hw_embed)
hw_embed = h_n.repeat(encoder_out.shape[0], 1, 1)
- pdb.set_trace()
+
decoder_outs = self.decoder(
encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, contextual_info=hw_embed, clas_scale=clas_scale
)
- pdb.set_trace()
+
decoder_out = decoder_outs[0]
decoder_out = torch.log_softmax(decoder_out, dim=-1)
return decoder_out, ys_pad_lens
@@ -363,14 +347,11 @@
clas_scale=kwargs.get("clas_scale", 1.0))
decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
- pdb.set_trace()
results = []
b, n, d = decoder_out.size()
- pdb.set_trace()
for i in range(b):
x = encoder_out[i, :encoder_out_lens[i], :]
am_scores = decoder_out[i, :pre_token_length[i], :]
- pdb.set_trace()
if self.beam_search is not None:
nbest_hyps = self.beam_search(
x=x, am_scores=am_scores, maxlenratio=kwargs.get("maxlenratio", 0.0),
@@ -423,7 +404,6 @@
results.append(result_i)
return results, meta_data
-
def generate_hotwords_list(self, hotword_list_or_file, tokenizer=None, frontend=None):
def load_seg_dict(seg_dict_file):
@@ -526,3 +506,12 @@
hotword_list = None
return hotword_list
+ def export(
+ self,
+ **kwargs,
+ ):
+ if 'max_seq_len' not in kwargs:
+ kwargs['max_seq_len'] = 512
+ from .export_meta import export_rebuild_model
+ models = export_rebuild_model(model=self, **kwargs)
+ return models
--
Gitblit v1.9.1