funasr/export/export_model.py
@@ -74,8 +74,9 @@ # using dummy inputs for a example if self.audio_in is not None: feats, feats_len = self.load_feats(self.audio_in) for feat, len in zip(feats, feats_len): m(feat, len) for i, (feat, len) in enumerate(zip(feats, feats_len)): with torch.no_grad(): m(feat, len) else: dummy_input = model.get_dummy_inputs() m(*dummy_input)