From e022acfdea54fd7a6aefb7a353cd5b10a39d3bfd Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 29 二月 2024 17:23:30 +0800
Subject: [PATCH] v1.0.11
---
funasr/models/seaco_paraformer/model.py | 115 +++++++++++++++++++++++++--------------------------------
1 files changed, 50 insertions(+), 65 deletions(-)
diff --git a/funasr/models/seaco_paraformer/model.py b/funasr/models/seaco_paraformer/model.py
index 2de125a..21ad874 100644
--- a/funasr/models/seaco_paraformer/model.py
+++ b/funasr/models/seaco_paraformer/model.py
@@ -1,3 +1,8 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
import os
import re
import time
@@ -8,24 +13,22 @@
import tempfile
import requests
import numpy as np
-from typing import Dict
-from typing import List
-from typing import Tuple
-from typing import Union
-from typing import Optional
+from typing import Dict, Tuple
from contextlib import contextmanager
from distutils.version import LooseVersion
-from funasr.losses.label_smoothing_loss import (
- LabelSmoothingLoss, # noqa: H301
-)
-from funasr.models.paraformer.cif_predictor import mae_loss
-from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
-from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
-from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
-from funasr.metrics.compute_acc import th_accuracy
-from funasr.train_utils.device_funcs import force_gatherable
+from funasr.register import tables
+from funasr.utils import postprocess_utils
+from funasr.models.paraformer.model import Paraformer
+from funasr.utils.datadir_writer import DatadirWriter
from funasr.models.paraformer.search import Hypothesis
+from funasr.train_utils.device_funcs import force_gatherable
+from funasr.models.bicif_paraformer.model import BiCifParaformer
+from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
+from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
+from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
+from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
+from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
@@ -35,13 +38,6 @@
@contextmanager
def autocast(enabled=True):
yield
-from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
-from funasr.utils import postprocess_utils
-from funasr.utils.datadir_writer import DatadirWriter
-
-from funasr.models.paraformer.model import Paraformer
-from funasr.models.bicif_paraformer.model import BiCifParaformer
-from funasr.register import tables
@tables.register("model_classes", "SeacoParaformer")
@@ -68,7 +64,6 @@
# bias encoder
if self.bias_encoder_type == 'lstm':
- logging.warning("enable bias encoder sampling and contextual training")
self.bias_encoder = torch.nn.LSTM(self.inner_dim,
self.inner_dim,
2,
@@ -79,9 +74,8 @@
self.lstm_proj = torch.nn.Linear(self.inner_dim*2, self.inner_dim)
else:
self.lstm_proj = None
- self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
+ # self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
elif self.bias_encoder_type == 'mean':
- logging.warning("enable bias encoder sampling and contextual training")
self.bias_embed = torch.nn.Embedding(self.vocab_size, self.inner_dim)
else:
logging.error("Unsupport bias encoder type: {}".format(self.bias_encoder_type))
@@ -90,7 +84,7 @@
seaco_decoder = kwargs.get("seaco_decoder", None)
if seaco_decoder is not None:
seaco_decoder_conf = kwargs.get("seaco_decoder_conf")
- seaco_decoder_class = tables.decoder_classes.get(seaco_decoder.lower())
+ seaco_decoder_class = tables.decoder_classes.get(seaco_decoder)
self.seaco_decoder = seaco_decoder_class(
vocab_size=self.vocab_size,
encoder_output_size=self.inner_dim,
@@ -214,7 +208,7 @@
ys_pad_lens,
hw_list,
nfilter=50,
- seaco_weight=1.0):
+ seaco_weight=1.0):
# decoder forward
decoder_out, decoder_hidden, _ = self.decoder(encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, return_hidden=True, return_both=True)
decoder_pred = torch.log_softmax(decoder_out, dim=-1)
@@ -229,12 +223,8 @@
# ASF Core
if nfilter > 0 and nfilter < num_hot_word:
- for dec in self.seaco_decoder.decoders:
- dec.reserve_attn = True
- # cif_attended, _ = self.decoder2(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
- dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
- # cif_filter = torch.topk(self.decoder2.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1], min(nfilter, num_hot_word-1))[1].tolist()
- hotword_scores = self.seaco_decoder.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1]
+ hotword_scores = self.seaco_decoder.forward_asf6(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
+ hotword_scores = hotword_scores[0].sum(0).sum(0)
# hotword_scores /= torch.sqrt(torch.tensor(hw_lengths)[:-1].float()).to(hotword_scores.device)
dec_filter = torch.topk(hotword_scores, min(nfilter, num_hot_word-1))[1].tolist()
add_filter = dec_filter
@@ -245,9 +235,6 @@
contextual_info = selected.squeeze(0).repeat(encoder_out.shape[0], 1, 1).to(encoder_out.device)
num_hot_word = contextual_info.shape[1]
_contextual_length = torch.Tensor([num_hot_word]).int().repeat(encoder_out.shape[0]).to(encoder_out.device)
- for dec in self.seaco_decoder.decoders:
- dec.attn_mat = []
- dec.reserve_attn = False
# SeACo Core
cif_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
@@ -256,10 +243,9 @@
dha_output = self.hotword_output_layer(merged) # remove the last token in loss calculation
dha_pred = torch.log_softmax(dha_output, dim=-1)
- # import pdb; pdb.set_trace()
def _merge_res(dec_output, dha_output):
lmbd = torch.Tensor([seaco_weight] * dha_output.shape[0])
- dha_ids = dha_output.max(-1)[-1][0]
+ dha_ids = dha_output.max(-1)[-1]# [0]
dha_mask = (dha_ids == 8377).int().unsqueeze(-1)
a = (1 - lmbd) / lmbd
b = 1 / lmbd
@@ -269,6 +255,7 @@
logits = dec_output * dha_mask + dha_output[:,:,:] * (1-dha_mask)
return logits
merged_pred = _merge_res(decoder_pred, dha_pred)
+ # import pdb; pdb.set_trace()
return merged_pred
else:
return decoder_pred
@@ -278,6 +265,8 @@
hotword_lengths):
if self.bias_encoder_type != 'lstm':
logging.error("Unsupported bias encoder type")
+
+ '''
hw_embed = self.decoder.embed(hotword_pad)
hw_embed, (_, _) = self.bias_encoder(hw_embed)
if self.lstm_proj is not None:
@@ -285,28 +274,22 @@
_ind = np.arange(0, hw_embed.shape[0]).tolist()
selected = hw_embed[_ind, [i-1 for i in hotword_lengths.detach().cpu().tolist()]]
return selected
+ '''
- '''
- def calc_predictor(self, encoder_out, encoder_out_lens):
- encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
- encoder_out.device)
- pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index, pre_token_length2 = self.predictor(encoder_out,
- None,
- encoder_out_mask,
- ignore_id=self.ignore_id)
- return pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index
-
-
- def calc_predictor_timestamp(self, encoder_out, encoder_out_lens, token_num):
- encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to(
- encoder_out.device)
- ds_alphas, ds_cif_peak, us_alphas, us_peaks = self.predictor.get_upsample_timestamp(encoder_out,
- encoder_out_mask,
- token_num)
- return ds_alphas, ds_cif_peak, us_alphas, us_peaks
- '''
+ # hw_embed = self.sac_embedding(hotword_pad)
+ hw_embed = self.decoder.embed(hotword_pad)
+ hw_embed = torch.nn.utils.rnn.pack_padded_sequence(hw_embed, hotword_lengths.cpu().type(torch.int64), batch_first=True, enforce_sorted=False)
+ packed_rnn_output, _ = self.bias_encoder(hw_embed)
+ rnn_output = torch.nn.utils.rnn.pad_packed_sequence(packed_rnn_output, batch_first=True)[0]
+ if self.lstm_proj is not None:
+ hw_hidden = self.lstm_proj(rnn_output)
+ else:
+ hw_hidden = rnn_output
+ _ind = np.arange(0, hw_hidden.shape[0]).tolist()
+ selected = hw_hidden[_ind, [i-1 for i in hotword_lengths.detach().cpu().tolist()]]
+ return selected
- def generate(self,
+ def inference(self,
data_in,
data_lengths=None,
key: list = None,
@@ -327,7 +310,7 @@
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
@@ -337,8 +320,9 @@
meta_data[
"batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
- speech.to(device=kwargs["device"]), speech_lengths.to(device=kwargs["device"])
-
+ speech = speech.to(device=kwargs["device"])
+ speech_lengths = speech_lengths.to(device=kwargs["device"])
+
# hotword
self.hotword_list = self.generate_hotwords_list(kwargs.get("hotword", None), tokenizer=tokenizer, frontend=frontend)
@@ -388,9 +372,11 @@
nbest_hyps = [Hypothesis(yseq=yseq, score=score)]
for nbest_idx, hyp in enumerate(nbest_hyps):
ibest_writer = None
- if ibest_writer is None and kwargs.get("output_dir") is not None:
- writer = DatadirWriter(kwargs.get("output_dir"))
- ibest_writer = writer[f"{nbest_idx + 1}best_recog"]
+ if kwargs.get("output_dir") is not None:
+ if not hasattr(self, "writer"):
+ self.writer = DatadirWriter(kwargs.get("output_dir"))
+ ibest_writer = self.writer[f"{nbest_idx + 1}best_recog"]
+
# remove sos/eos and get results
last_pos = -1
if isinstance(hyp.yseq, list):
@@ -416,12 +402,11 @@
token, timestamp)
result_i = {"key": key[i], "text": text_postprocessed,
- "timestamp": time_stamp_postprocessed,
+ "timestamp": time_stamp_postprocessed
}
if ibest_writer is not None:
ibest_writer["token"][key[i]] = " ".join(token)
- # ibest_writer["text"][key[i]] = text
ibest_writer["timestamp"][key[i]] = time_stamp_postprocessed
ibest_writer["text"][key[i]] = text_postprocessed
else:
--
Gitblit v1.9.1