funasr/models/llm_asr/adaptor.py
@@ -83,6 +83,8 @@ from funasr.models.transformer.attention import MultiHeadedAttention from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForward self.blocks = None if kwargs.get("n_layer", 2) > 0: self.blocks = nn.ModuleList( [ EncoderLayer( @@ -123,6 +125,7 @@ olens = None olens = (ilens - 1) // self.k + 1 masks = (~make_pad_mask(olens)[:, None, :]).to(x.device) if self.blocks is not None: for layer, block in enumerate(self.blocks): x, masks = block(x, masks) return x, olens