auto frontend
funasr/models/llm_asr/adaptor.py @@ -84,7 +84,7 @@ self.blocks = nn.ModuleList( [ EncoderLayer( output_size, llm_dim, MultiHeadedAttention( kwargs.get("attention_heads", 8), llm_dim,