From bf4b3ef9cb95acaa2b92b98f236c4f3228cdbc2d Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 21 九月 2023 16:30:43 +0800
Subject: [PATCH] Merge pull request #976 from alibaba-damo-academy/dev_lhn
---
funasr/models/decoder/sanm_decoder.py | 146 ++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 121 insertions(+), 25 deletions(-)
diff --git a/funasr/models/decoder/sanm_decoder.py b/funasr/models/decoder/sanm_decoder.py
index 0117430..ff35e46 100644
--- a/funasr/models/decoder/sanm_decoder.py
+++ b/funasr/models/decoder/sanm_decoder.py
@@ -7,7 +7,6 @@
from funasr.modules.streaming_utils import utils as myutils
from funasr.models.decoder.transformer_decoder import BaseTransformerDecoder
-from typeguard import check_argument_types
from funasr.modules.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt
from funasr.modules.embedding import PositionalEncoding
@@ -94,6 +93,46 @@
if self.self_attn:
if self.normalize_before:
tgt = self.norm2(tgt)
+ x, _ = self.self_attn(tgt, tgt_mask)
+ x = residual + self.dropout(x)
+
+ if self.src_attn is not None:
+ residual = x
+ if self.normalize_before:
+ x = self.norm3(x)
+
+ x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
+
+ return x, tgt_mask, memory, memory_mask, cache
+
+ def forward_one_step(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
+ """Compute decoded features.
+
+ Args:
+ tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
+ tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
+ memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
+ memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
+ cache (List[torch.Tensor]): List of cached tensors.
+ Each tensor shape should be (#batch, maxlen_out - 1, size).
+
+ Returns:
+ torch.Tensor: Output tensor(#batch, maxlen_out, size).
+ torch.Tensor: Mask for output tensor (#batch, maxlen_out).
+ torch.Tensor: Encoded memory (#batch, maxlen_in, size).
+ torch.Tensor: Encoded memory mask (#batch, maxlen_in).
+
+ """
+ # tgt = self.dropout(tgt)
+ residual = tgt
+ if self.normalize_before:
+ tgt = self.norm1(tgt)
+ tgt = self.feed_forward(tgt)
+
+ x = tgt
+ if self.self_attn:
+ if self.normalize_before:
+ tgt = self.norm2(tgt)
if self.training:
cache = None
x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
@@ -109,10 +148,50 @@
return x, tgt_mask, memory, memory_mask, cache
+ def forward_chunk(self, tgt, memory, fsmn_cache=None, opt_cache=None, chunk_size=None, look_back=0):
+ """Compute decoded features.
+
+ Args:
+ tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
+ tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
+ memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
+ memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
+ cache (List[torch.Tensor]): List of cached tensors.
+ Each tensor shape should be (#batch, maxlen_out - 1, size).
+
+ Returns:
+ torch.Tensor: Output tensor(#batch, maxlen_out, size).
+ torch.Tensor: Mask for output tensor (#batch, maxlen_out).
+ torch.Tensor: Encoded memory (#batch, maxlen_in, size).
+ torch.Tensor: Encoded memory mask (#batch, maxlen_in).
+
+ """
+ residual = tgt
+ if self.normalize_before:
+ tgt = self.norm1(tgt)
+ tgt = self.feed_forward(tgt)
+
+ x = tgt
+ if self.self_attn:
+ if self.normalize_before:
+ tgt = self.norm2(tgt)
+ x, fsmn_cache = self.self_attn(tgt, None, fsmn_cache)
+ x = residual + self.dropout(x)
+
+ if self.src_attn is not None:
+ residual = x
+ if self.normalize_before:
+ x = self.norm3(x)
+
+ x, opt_cache = self.src_attn.forward_chunk(x, memory, opt_cache, chunk_size, look_back)
+ x = residual + x
+
+ return x, memory, fsmn_cache, opt_cache
+
class FsmnDecoderSCAMAOpt(BaseTransformerDecoder):
"""
- author: Speech Lab, Alibaba Group, China
+ Author: Speech Lab of DAMO Academy, Alibaba Group
SCAMA: Streaming chunk-aware multihead attention for online end-to-end speech recognition
https://arxiv.org/abs/2006.01713
@@ -142,7 +221,6 @@
tf2torch_tensor_name_prefix_tf: str = "seq2seq/decoder",
embed_tensor_name_prefix_tf: str = None,
):
- assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
@@ -360,7 +438,7 @@
for i in range(self.att_layer_num):
decoder = self.decoders[i]
c = cache[i]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_one_step(
x, tgt_mask, memory, memory_mask, cache=c
)
new_cache.append(c_ret)
@@ -370,13 +448,13 @@
j = i + self.att_layer_num
decoder = self.decoders2[i]
c = cache[j]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_one_step(
x, tgt_mask, memory, memory_mask, cache=c
)
new_cache.append(c_ret)
for decoder in self.decoders3:
- x, tgt_mask, memory, memory_mask, _ = decoder(
+ x, tgt_mask, memory, memory_mask, _ = decoder.forward_one_step(
x, tgt_mask, memory, None, cache=None
)
@@ -773,7 +851,7 @@
class ParaformerSANMDecoder(BaseTransformerDecoder):
"""
- author: Speech Lab, Alibaba Group, China
+ Author: Speech Lab of DAMO Academy, Alibaba Group
Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition
https://arxiv.org/abs/2006.01713
"""
@@ -796,10 +874,14 @@
att_layer_num: int = 6,
kernel_size: int = 21,
sanm_shfit: int = 0,
+ lora_list: List[str] = None,
+ lora_rank: int = 8,
+ lora_alpha: int = 16,
+ lora_dropout: float = 0.1,
+ chunk_multiply_factor: tuple = (1,),
tf2torch_tensor_name_prefix_torch: str = "decoder",
tf2torch_tensor_name_prefix_tf: str = "seq2seq/decoder",
):
- assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
@@ -849,7 +931,7 @@
attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=sanm_shfit
),
MultiHeadedAttentionCrossAtt(
- attention_heads, attention_dim, src_attention_dropout_rate
+ attention_heads, attention_dim, src_attention_dropout_rate, lora_list, lora_rank, lora_alpha, lora_dropout
),
PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
dropout_rate,
@@ -889,6 +971,7 @@
)
self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
+ self.chunk_multiply_factor = chunk_multiply_factor
def forward(
self,
@@ -896,6 +979,7 @@
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
+ chunk_mask: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
@@ -916,9 +1000,13 @@
"""
tgt = ys_in_pad
tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
-
+
memory = hs_pad
memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
+ if chunk_mask is not None:
+ memory_mask = memory_mask * chunk_mask
+ if tgt_mask.size(1) != memory_mask.size(1):
+ memory_mask = torch.cat((memory_mask, memory_mask[:, -2:-1, :]), dim=1)
x = tgt
x, tgt_mask, memory, memory_mask, _ = self.decoders(
@@ -975,35 +1063,43 @@
cache_layer_num = len(self.decoders)
if self.decoders2 is not None:
cache_layer_num += len(self.decoders2)
- new_cache = [None] * cache_layer_num
+ fsmn_cache = [None] * cache_layer_num
else:
- new_cache = cache["decode_fsmn"]
+ fsmn_cache = cache["decode_fsmn"]
+
+ if cache["opt"] is None:
+ cache_layer_num = len(self.decoders)
+ opt_cache = [None] * cache_layer_num
+ else:
+ opt_cache = cache["opt"]
+
for i in range(self.att_layer_num):
decoder = self.decoders[i]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
- x, None, memory, None, cache=new_cache[i]
+ x, memory, fsmn_cache[i], opt_cache[i] = decoder.forward_chunk(
+ x, memory, fsmn_cache=fsmn_cache[i], opt_cache=opt_cache[i],
+ chunk_size=cache["chunk_size"], look_back=cache["decoder_chunk_look_back"]
)
- new_cache[i] = c_ret
if self.num_blocks - self.att_layer_num > 1:
for i in range(self.num_blocks - self.att_layer_num):
j = i + self.att_layer_num
decoder = self.decoders2[i]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
- x, None, memory, None, cache=new_cache[j]
+ x, memory, fsmn_cache[j], _ = decoder.forward_chunk(
+ x, memory, fsmn_cache=fsmn_cache[j]
)
- new_cache[j] = c_ret
for decoder in self.decoders3:
-
- x, tgt_mask, memory, memory_mask, _ = decoder(
- x, None, memory, None, cache=None
+ x, memory, _, _ = decoder.forward_chunk(
+ x, memory
)
if self.normalize_before:
x = self.after_norm(x)
if self.output_layer is not None:
x = self.output_layer(x)
- cache["decode_fsmn"] = new_cache
+
+ cache["decode_fsmn"] = fsmn_cache
+ if cache["decoder_chunk_look_back"] > 0 or cache["decoder_chunk_look_back"] == -1:
+ cache["opt"] = opt_cache
return x
def forward_one_step(
@@ -1037,7 +1133,7 @@
for i in range(self.att_layer_num):
decoder = self.decoders[i]
c = cache[i]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_one_step(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(c_ret)
@@ -1047,14 +1143,14 @@
j = i + self.att_layer_num
decoder = self.decoders2[i]
c = cache[j]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_one_step(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(c_ret)
for decoder in self.decoders3:
- x, tgt_mask, memory, memory_mask, _ = decoder(
+ x, tgt_mask, memory, memory_mask, _ = decoder.forward_one_step(
x, tgt_mask, memory, None, cache=None
)
--
Gitblit v1.9.1