funasr/models/ct_transformer_streaming/encoder.py
@@ -416,13 +416,10 @@ self.model = model self._output_size = model._output_size from funasr.utils.torch_function import MakePadMask from funasr.utils.torch_function import sequence_mask if onnx: self.make_pad_mask = MakePadMask(max_seq_len, flip=False) else: self.make_pad_mask = sequence_mask(max_seq_len, flip=False) self.make_pad_mask = sequence_mask(max_seq_len, flip=False) from funasr.models.sanm.attention import MultiHeadedAttentionSANMExport