From f6308928635fa9f472e34911b2b1d0254ee18152 Mon Sep 17 00:00:00 2001
From: haoneng.lhn <haoneng.lhn@alibaba-inc.com>
Date: 星期三, 24 五月 2023 20:23:27 +0800
Subject: [PATCH] update
---
funasr/models/decoder/sanm_decoder.py | 23 ++++++++++++++---------
1 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/funasr/models/decoder/sanm_decoder.py b/funasr/models/decoder/sanm_decoder.py
index a36d95e..508eb73 100644
--- a/funasr/models/decoder/sanm_decoder.py
+++ b/funasr/models/decoder/sanm_decoder.py
@@ -94,7 +94,7 @@
if self.self_attn:
if self.normalize_before:
tgt = self.norm2(tgt)
- x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
+ x, _ = self.self_attn(tgt, tgt_mask)
x = residual + self.dropout(x)
if self.src_attn is not None:
@@ -151,7 +151,7 @@
class FsmnDecoderSCAMAOpt(BaseTransformerDecoder):
"""
- author: Speech Lab, Alibaba Group, China
+ Author: Speech Lab of DAMO Academy, Alibaba Group
SCAMA: Streaming chunk-aware multihead attention for online end-to-end speech recognition
https://arxiv.org/abs/2006.01713
@@ -399,7 +399,7 @@
for i in range(self.att_layer_num):
decoder = self.decoders[i]
c = cache[i]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
x, tgt_mask, memory, memory_mask, cache=c
)
new_cache.append(c_ret)
@@ -409,13 +409,13 @@
j = i + self.att_layer_num
decoder = self.decoders2[i]
c = cache[j]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
x, tgt_mask, memory, memory_mask, cache=c
)
new_cache.append(c_ret)
for decoder in self.decoders3:
- x, tgt_mask, memory, memory_mask, _ = decoder(
+ x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
x, tgt_mask, memory, None, cache=None
)
@@ -812,7 +812,7 @@
class ParaformerSANMDecoder(BaseTransformerDecoder):
"""
- author: Speech Lab, Alibaba Group, China
+ Author: Speech Lab of DAMO Academy, Alibaba Group
Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition
https://arxiv.org/abs/2006.01713
"""
@@ -935,6 +935,7 @@
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
+ chunk_mask: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
@@ -955,6 +956,10 @@
"""
tgt = ys_in_pad
tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
+ if chunk_mask is not None:
+ memory_mask = memory_mask * chunk_mask
+ if tgt_mask.size(1) != memory_mask.size(1):
+ memory_mask = torch.cat((memory_mask, memory_mask[:, -2:-1, :]), dim=1)
memory = hs_pad
memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
@@ -1076,7 +1081,7 @@
for i in range(self.att_layer_num):
decoder = self.decoders[i]
c = cache[i]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(c_ret)
@@ -1086,14 +1091,14 @@
j = i + self.att_layer_num
decoder = self.decoders2[i]
c = cache[j]
- x, tgt_mask, memory, memory_mask, c_ret = decoder(
+ x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
x, tgt_mask, memory, None, cache=c
)
new_cache.append(c_ret)
for decoder in self.decoders3:
- x, tgt_mask, memory, memory_mask, _ = decoder(
+ x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
x, tgt_mask, memory, None, cache=None
)
--
Gitblit v1.9.1