From 5f088a67cd1b18a8260746971f32a6569e0cf2c6 Mon Sep 17 00:00:00 2001
From: haoneng.lhn <haoneng.lhn@alibaba-inc.com>
Date: 星期三, 13 九月 2023 20:02:54 +0800
Subject: [PATCH] add paraformer online opt infer code
---
funasr/modules/attention.py | 32 ++++++++
funasr/models/encoder/sanm_encoder.py | 118 +++++++++++++++++++++++-----
funasr/bin/asr_inference_launch.py | 65 +++++++++++++--
3 files changed, 182 insertions(+), 33 deletions(-)
diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
index 0c564be..d5d1873 100644
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -840,37 +840,72 @@
data = yaml.load(f, Loader=yaml.Loader)
return data
- def _prepare_cache(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ def _prepare_cache(cache: dict = {}, chunk_size=[5, 10, 5], encoder_chunk_look_back=0,
+ decoder_chunk_look_back=0, batch_size=1):
if len(cache) > 0:
return cache
config = _read_yaml(asr_train_config)
enc_output_size = config["encoder_conf"]["output_size"]
feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
- "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
+ "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size,
+ "encoder_chunk_look_back": encoder_chunk_look_back, "last_chunk": False, "opt": None,
"feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
cache["encoder"] = cache_en
- cache_de = {"decode_fsmn": None}
+ cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None}
cache["decoder"] = cache_de
return cache
- def _cache_reset(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ def _cache_reset(cache: dict = {}, chunk_size=[5, 10, 5], encoder_chunk_look_back=0,
+ decoder_chunk_look_back=0, batch_size=1):
if len(cache) > 0:
config = _read_yaml(asr_train_config)
enc_output_size = config["encoder_conf"]["output_size"]
feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
- "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
- "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)),
- "tail_chunk": False}
+ "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size,
+ "encoder_chunk_look_back": encoder_chunk_look_back, "last_chunk": False, "opt": None,
+ "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
cache["encoder"] = cache_en
- cache_de = {"decode_fsmn": None}
+ cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None}
cache["decoder"] = cache_de
return cache
+
+ #def _prepare_cache(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ # if len(cache) > 0:
+ # return cache
+ # config = _read_yaml(asr_train_config)
+ # enc_output_size = config["encoder_conf"]["output_size"]
+ # feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
+ # cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
+ # "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
+ # "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
+ # cache["encoder"] = cache_en
+
+ # cache_de = {"decode_fsmn": None}
+ # cache["decoder"] = cache_de
+
+ # return cache
+
+ #def _cache_reset(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ # if len(cache) > 0:
+ # config = _read_yaml(asr_train_config)
+ # enc_output_size = config["encoder_conf"]["output_size"]
+ # feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
+ # cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
+ # "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
+ # "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)),
+ # "tail_chunk": False}
+ # cache["encoder"] = cache_en
+
+ # cache_de = {"decode_fsmn": None}
+ # cache["decoder"] = cache_de
+
+ # return cache
def _forward(
data_path_and_name_and_type,
@@ -899,12 +934,20 @@
is_final = False
cache = {}
chunk_size = [5, 10, 5]
+ encoder_chunk_look_back = 0
+ decoder_chunk_look_back = 0
if param_dict is not None and "cache" in param_dict:
cache = param_dict["cache"]
if param_dict is not None and "is_final" in param_dict:
is_final = param_dict["is_final"]
if param_dict is not None and "chunk_size" in param_dict:
chunk_size = param_dict["chunk_size"]
+ if param_dict is not None and "encoder_chunk_look_back" in param_dict:
+ encoder_chunk_look_back = param_dict["encoder_chunk_look_back"]
+ if encoder_chunk_look_back > 0:
+ chunk_size[0] = 0
+ if param_dict is not None and "decoder_chunk_look_back" in param_dict:
+ decoder_chunk_look_back = param_dict["decoder_chunk_look_back"]
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
@@ -916,7 +959,8 @@
sample_offset = 0
speech_length = raw_inputs.shape[1]
stride_size = chunk_size[1] * 960
- cache = _prepare_cache(cache, chunk_size=chunk_size, batch_size=1)
+ cache = _prepare_cache(cache, chunk_size=chunk_size, batch_size=1,
+ encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
final_result = ""
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
if sample_offset + stride_size >= speech_length - 1:
@@ -937,7 +981,8 @@
asr_result_list.append(item)
if is_final:
- cache = _cache_reset(cache, chunk_size=chunk_size, batch_size=1)
+ cache = _cache_reset(cache, chunk_size=chunk_size, batch_size=1,
+ encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
return asr_result_list
return _forward
diff --git a/funasr/models/encoder/sanm_encoder.py b/funasr/models/encoder/sanm_encoder.py
index 9e27d4a..ac4240c 100644
--- a/funasr/models/encoder/sanm_encoder.py
+++ b/funasr/models/encoder/sanm_encoder.py
@@ -114,8 +114,44 @@
if not self.normalize_before:
x = self.norm2(x)
-
return x, mask, cache, mask_shfit_chunk, mask_att_chunk_encoder
+
+ def forward_chunk(self, x, cache=None, chunk_size=None, look_back=0):
+ """Compute encoded features.
+
+ Args:
+ x_input (torch.Tensor): Input tensor (#batch, time, size).
+ mask (torch.Tensor): Mask tensor for the input (#batch, time).
+ cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
+
+ Returns:
+ torch.Tensor: Output tensor (#batch, time, size).
+ torch.Tensor: Mask tensor (#batch, time).
+
+ """
+
+ residual = x
+ if self.normalize_before:
+ x = self.norm1(x)
+
+ if self.in_size == self.size:
+ attn, cache = self.self_attn.forward_chunk(x, cache, chunk_size, look_back)
+ x = residual + attn
+ else:
+ x, cache = self.self_attn.forward_chunk(x, cache, chunk_size, look_back)
+
+ if not self.normalize_before:
+ x = self.norm1(x)
+
+ residual = x
+ if self.normalize_before:
+ x = self.norm2(x)
+ x = residual + self.feed_forward(x)
+ if not self.normalize_before:
+ x = self.norm2(x)
+
+ return x, cache
+
class SANMEncoder(AbsEncoder):
"""
@@ -837,11 +873,56 @@
cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
return overlap_feats
+ #def forward_chunk(self,
+ # xs_pad: torch.Tensor,
+ # ilens: torch.Tensor,
+ # cache: dict = None,
+ # ctc: CTC = None,
+ # ):
+ # xs_pad *= self.output_size() ** 0.5
+ # if self.embed is None:
+ # xs_pad = xs_pad
+ # else:
+ # xs_pad = self.embed(xs_pad, cache)
+ # if cache["tail_chunk"]:
+ # xs_pad = to_device(cache["feats"], device=xs_pad.device)
+ # else:
+ # xs_pad = self._add_overlap_chunk(xs_pad, cache)
+ # encoder_outs = self.encoders0(xs_pad, None, None, None, None)
+ # xs_pad, masks = encoder_outs[0], encoder_outs[1]
+ # intermediate_outs = []
+ # if len(self.interctc_layer_idx) == 0:
+ # encoder_outs = self.encoders(xs_pad, None, None, None, None)
+ # xs_pad, masks = encoder_outs[0], encoder_outs[1]
+ # else:
+ # for layer_idx, encoder_layer in enumerate(self.encoders):
+ # encoder_outs = encoder_layer(xs_pad, None, None, None, None)
+ # xs_pad, masks = encoder_outs[0], encoder_outs[1]
+ # if layer_idx + 1 in self.interctc_layer_idx:
+ # encoder_out = xs_pad
+
+ # # intermediate outputs are also normalized
+ # if self.normalize_before:
+ # encoder_out = self.after_norm(encoder_out)
+
+ # intermediate_outs.append((layer_idx + 1, encoder_out))
+
+ # if self.interctc_use_conditioning:
+ # ctc_out = ctc.softmax(encoder_out)
+ # xs_pad = xs_pad + self.conditioning_layer(ctc_out)
+
+ # if self.normalize_before:
+ # xs_pad = self.after_norm(xs_pad)
+
+ # if len(intermediate_outs) > 0:
+ # return (xs_pad, intermediate_outs), None, None
+ # return xs_pad, ilens, None
+
+
def forward_chunk(self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
cache: dict = None,
- ctc: CTC = None,
):
xs_pad *= self.output_size() ** 0.5
if self.embed is None:
@@ -852,34 +933,25 @@
xs_pad = to_device(cache["feats"], device=xs_pad.device)
else:
xs_pad = self._add_overlap_chunk(xs_pad, cache)
- encoder_outs = self.encoders0(xs_pad, None, None, None, None)
- xs_pad, masks = encoder_outs[0], encoder_outs[1]
- intermediate_outs = []
- if len(self.interctc_layer_idx) == 0:
- encoder_outs = self.encoders(xs_pad, None, None, None, None)
- xs_pad, masks = encoder_outs[0], encoder_outs[1]
+ if cache["opt"] is None:
+ cache_layer_num = len(self.encoders0) + len(self.encoders)
+ new_cache = [None] * cache_layer_num
else:
- for layer_idx, encoder_layer in enumerate(self.encoders):
- encoder_outs = encoder_layer(xs_pad, None, None, None, None)
- xs_pad, masks = encoder_outs[0], encoder_outs[1]
- if layer_idx + 1 in self.interctc_layer_idx:
- encoder_out = xs_pad
+ new_cache = cache["opt"]
- # intermediate outputs are also normalized
- if self.normalize_before:
- encoder_out = self.after_norm(encoder_out)
+ for layer_idx, encoder_layer in enumerate(self.encoders0):
+ encoder_outs = encoder_layer.forward_chunk(xs_pad, new_cache[layer_idx], cache["chunk_size"], cache["encoder_chunk_look_back"])
+ xs_pad, new_cache[0] = encoder_outs[0], encoder_outs[1]
- intermediate_outs.append((layer_idx + 1, encoder_out))
-
- if self.interctc_use_conditioning:
- ctc_out = ctc.softmax(encoder_out)
- xs_pad = xs_pad + self.conditioning_layer(ctc_out)
+ for layer_idx, encoder_layer in enumerate(self.encoders):
+ encoder_outs = encoder_layer.forward_chunk(xs_pad, new_cache[layer_idx+len(self.encoders0)], cache["chunk_size"], cache["encoder_chunk_look_back"])
+ xs_pad, new_cache[layer_idx+1] = encoder_outs[0], encoder_outs[1]
if self.normalize_before:
xs_pad = self.after_norm(xs_pad)
+ if cache["encoder_chunk_look_back"] > 0:
+ cache["opt"] = new_cache
- if len(intermediate_outs) > 0:
- return (xs_pad, intermediate_outs), None, None
return xs_pad, ilens, None
def gen_tf2torch_map_dict(self):
diff --git a/funasr/modules/attention.py b/funasr/modules/attention.py
index ab59493..f5430e1 100644
--- a/funasr/modules/attention.py
+++ b/funasr/modules/attention.py
@@ -456,6 +456,38 @@
att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
return att_outs + fsmn_memory
+ def forward_chunk(self, x, cache=None, chunk_size=None, look_back=0):
+ """Compute scaled dot product attention.
+
+ Args:
+ query (torch.Tensor): Query tensor (#batch, time1, size).
+ key (torch.Tensor): Key tensor (#batch, time2, size).
+ value (torch.Tensor): Value tensor (#batch, time2, size).
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
+ (#batch, time1, time2).
+
+ Returns:
+ torch.Tensor: Output tensor (#batch, time1, d_model).
+
+ """
+ q_h, k_h, v_h, v = self.forward_qkv(x)
+ if chunk_size is not None and look_back > 0:
+ if cache is not None:
+ k_h = torch.cat((cache["k"], k_h), dim=2)
+ v_h = torch.cat((cache["v"], v_h), dim=2)
+ cache["k"] = k_h[:, :, -(look_back * chunk_size[1]):, :]
+ cache["v"] = v_h[:, :, -(look_back * chunk_size[1]):, :]
+ else:
+ cache_tmp = {"k": k_h[:, :, -(look_back * chunk_size[1]):, :],
+ "v": v_h[:, :, -(look_back * chunk_size[1]):, :]}
+ cache = cache_tmp
+ fsmn_memory = self.forward_fsmn(v, None)
+ q_h = q_h * self.d_k ** (-0.5)
+ scores = torch.matmul(q_h, k_h.transpose(-2, -1))
+ att_outs = self.forward_attention(v_h, scores, None)
+ return att_outs + fsmn_memory, cache
+
+
class MultiHeadedAttentionSANMwithMask(MultiHeadedAttentionSANM):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
--
Gitblit v1.9.1