funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
@@ -272,7 +272,7 @@ position embedded tensor and mask """ masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) xs_pad *= self.output_size()**0.5 xs_pad = xs_pad * self.output_size()**0.5 if self.embed is None: xs_pad = xs_pad elif (