wanchen.swc
2023-03-16 6ca0d1f54c8b698e3315edab8aa9ba7227d7c9e7
funasr/export/export_model.py
@@ -76,7 +76,8 @@
                feats, feats_len = self.load_feats(self.audio_in)
                for i, (feat, len) in enumerate(zip(feats, feats_len)):
                    print("debug, iter: {}".format(i))
                    m(feat, len)
                    with torch.no_grad():
                        m(feat, len)
            else:
                dummy_input = model.get_dummy_inputs()
                m(*dummy_input)