From 3f8294b9d7deaa0cbdb0b2ef6f3802d46ae133a9 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 25 十二月 2024 17:16:11 +0800
Subject: [PATCH] Revert "shfit to shift (#2266)" (#2336)
---
funasr/models/scama/decoder.py | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/funasr/models/scama/decoder.py b/funasr/models/scama/decoder.py
index f457b75..31b2357 100644
--- a/funasr/models/scama/decoder.py
+++ b/funasr/models/scama/decoder.py
@@ -226,7 +226,7 @@
concat_after: bool = False,
att_layer_num: int = 6,
kernel_size: int = 21,
- sanm_shift: int = None,
+ sanm_shfit: int = None,
concat_embeds: bool = False,
attention_dim: int = None,
tf2torch_tensor_name_prefix_torch: str = "decoder",
@@ -271,14 +271,14 @@
self.att_layer_num = att_layer_num
self.num_blocks = num_blocks
- if sanm_shift is None:
- sanm_shift = (kernel_size - 1) // 2
+ if sanm_shfit is None:
+ sanm_shfit = (kernel_size - 1) // 2
self.decoders = repeat(
att_layer_num,
lambda lnum: DecoderLayerSANM(
attention_dim,
MultiHeadedAttentionSANMDecoder(
- attention_dim, self_attention_dropout_rate, kernel_size, sanm_shift=sanm_shift
+ attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=sanm_shfit
),
MultiHeadedAttentionCrossAtt(
attention_heads,
@@ -303,7 +303,7 @@
attention_dim,
self_attention_dropout_rate,
kernel_size,
- sanm_shift=sanm_shift,
+ sanm_shfit=sanm_shfit,
),
None,
PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
--
Gitblit v1.9.1