From 19f4fae784210e85421ae2f8dcd0fbbd1eb2ad3e Mon Sep 17 00:00:00 2001
From: lingyunfly <121302812+lingyunfly@users.noreply.github.com>
Date: 星期四, 18 五月 2023 14:15:27 +0800
Subject: [PATCH] Update vad_inference_launch.py

---
 funasr/models/encoder/opennmt_encoders/self_attention_encoder.py |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py b/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
index 443b37a..db30f08 100644
--- a/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
+++ b/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
@@ -117,7 +117,7 @@
 
 class SelfAttentionEncoder(AbsEncoder):
     """
-    author: Speech Lab, Alibaba Group, China
+    Author: Speech Lab of DAMO Academy, Alibaba Group
     Self attention encoder in OpenNMT framework
     """
 
@@ -272,7 +272,7 @@
             position embedded tensor and mask
         """
         masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
-        xs_pad *= self.output_size()**0.5
+        xs_pad = xs_pad * self.output_size()**0.5
         if self.embed is None:
             xs_pad = xs_pad
         elif (

--
Gitblit v1.9.1