游雁
2023-03-17 2d1d47b1618c1cc89fe19c3ea8ea46fc848c2d00
funasr/export/export_model.py
@@ -74,8 +74,9 @@
            # using dummy inputs for a example
            if self.audio_in is not None:
                feats, feats_len = self.load_feats(self.audio_in)
                for feat, len in zip(feats, feats_len):
                    m(feat, len)
                for i, (feat, len) in enumerate(zip(feats, feats_len)):
                    with torch.no_grad():
                        m(feat, len)
            else:
                dummy_input = model.get_dummy_inputs()
                m(*dummy_input)