雾聪
2024-03-14 3549c0106e5a35ef2ddffdfd7381e613ed5310bd
funasr/models/paraformer/model.py
@@ -21,7 +21,7 @@
from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.train_utils.device_funcs import to_device
@tables.register("model_classes", "Paraformer")
class Paraformer(torch.nn.Module):
@@ -554,37 +554,35 @@
        max_seq_len=512,
        **kwargs,
    ):
        onnx = kwargs.get("onnx", True)
        self.device = kwargs.get("device")
        is_onnx = kwargs.get("type", "onnx") == "onnx"
        encoder_class = tables.encoder_classes.get(kwargs["encoder"]+"Export")
        self.encoder = encoder_class(self.encoder, onnx=onnx)
        self.encoder = encoder_class(self.encoder, onnx=is_onnx)
        
        predictor_class = tables.predictor_classes.get(kwargs["predictor"]+"Export")
        self.predictor = predictor_class(self.predictor, onnx=onnx)
        self.predictor = predictor_class(self.predictor, onnx=is_onnx)
        decoder_class = tables.decoder_classes.get(kwargs["decoder"]+"Export")
        self.decoder = decoder_class(self.decoder, onnx=onnx)
        self.decoder = decoder_class(self.decoder, onnx=is_onnx)
        
        from funasr.utils.torch_function import MakePadMask
        from funasr.utils.torch_function import sequence_mask
        if onnx:
            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
        else:
            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
        self.forward = self._export_forward
        self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
        self.forward = self.export_forward
        
        return self
    def _export_forward(
    def export_forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
    ):
        # a. To device
        batch = {"speech": speech, "speech_lengths": speech_lengths}
        # batch = to_device(batch, device=self.device)
        batch = to_device(batch, device=self.device)
    
        enc, enc_len = self.encoder(**batch)
        mask = self.make_pad_mask(enc_len)[:, None, :]