From 3f8294b9d7deaa0cbdb0b2ef6f3802d46ae133a9 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 25 十二月 2024 17:16:11 +0800
Subject: [PATCH] Revert "shfit to shift (#2266)" (#2336)
---
funasr/models/sense_voice/model.py | 32 ++++++++++++++++----------------
1 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index 0e3ef5f..70cd02e 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -95,7 +95,7 @@
n_feat,
dropout_rate,
kernel_size,
- sanm_shift=0,
+ sanm_shfit=0,
lora_list=None,
lora_rank=8,
lora_alpha=16,
@@ -121,17 +121,17 @@
)
# padding
left_padding = (kernel_size - 1) // 2
- if sanm_shift > 0:
- left_padding = left_padding + sanm_shift
+ if sanm_shfit > 0:
+ left_padding = left_padding + sanm_shfit
right_padding = kernel_size - 1 - left_padding
self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)
- def forward_fsmn(self, inputs, mask, mask_shift_chunk=None):
+ def forward_fsmn(self, inputs, mask, mask_shfit_chunk=None):
b, t, d = inputs.size()
if mask is not None:
mask = torch.reshape(mask, (b, -1, 1))
- if mask_shift_chunk is not None:
- mask = mask * mask_shift_chunk
+ if mask_shfit_chunk is not None:
+ mask = mask * mask_shfit_chunk
inputs = inputs * mask
x = inputs.transpose(1, 2)
@@ -211,7 +211,7 @@
return self.linear_out(x) # (batch, time1, d_model)
- def forward(self, x, mask, mask_shift_chunk=None, mask_att_chunk_encoder=None):
+ def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
"""Compute scaled dot product attention.
Args:
@@ -226,7 +226,7 @@
"""
q_h, k_h, v_h, v = self.forward_qkv(x)
- fsmn_memory = self.forward_fsmn(v, mask, mask_shift_chunk)
+ fsmn_memory = self.forward_fsmn(v, mask, mask_shfit_chunk)
q_h = q_h * self.d_k ** (-0.5)
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
@@ -326,7 +326,7 @@
self.stochastic_depth_rate = stochastic_depth_rate
self.dropout_rate = dropout_rate
- def forward(self, x, mask, cache=None, mask_shift_chunk=None, mask_att_chunk_encoder=None):
+ def forward(self, x, mask, cache=None, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
"""Compute encoded features.
Args:
@@ -363,7 +363,7 @@
self.self_attn(
x,
mask,
- mask_shift_chunk=mask_shift_chunk,
+ mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
),
),
@@ -379,7 +379,7 @@
self.self_attn(
x,
mask,
- mask_shift_chunk=mask_shift_chunk,
+ mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
)
)
@@ -388,7 +388,7 @@
self.self_attn(
x,
mask,
- mask_shift_chunk=mask_shift_chunk,
+ mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
)
)
@@ -402,7 +402,7 @@
if not self.normalize_before:
x = self.norm2(x)
- return x, mask, cache, mask_shift_chunk, mask_att_chunk_encoder
+ return x, mask, cache, mask_shfit_chunk, mask_att_chunk_encoder
def forward_chunk(self, x, cache=None, chunk_size=None, look_back=0):
"""Compute encoded features.
@@ -469,7 +469,7 @@
positionwise_conv_kernel_size: int = 1,
padding_idx: int = -1,
kernel_size: int = 11,
- sanm_shift: int = 0,
+ sanm_shfit: int = 0,
selfattention_layer_type: str = "sanm",
**kwargs,
):
@@ -494,7 +494,7 @@
output_size,
attention_dropout_rate,
kernel_size,
- sanm_shift,
+ sanm_shfit,
)
encoder_selfattn_layer_args = (
attention_heads,
@@ -502,7 +502,7 @@
output_size,
attention_dropout_rate,
kernel_size,
- sanm_shift,
+ sanm_shfit,
)
self.encoders0 = nn.ModuleList(
--
Gitblit v1.9.1