From d783b24ba7d8a03dabfa2139fcbf40c216e0ea3d Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 16 三月 2023 19:34:52 +0800
Subject: [PATCH] Merge pull request #199 from alibaba-damo-academy/dev_xw

---
 funasr/export/export_model.py |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index 7e65a8f..9a1ef96 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -74,8 +74,9 @@
             # using dummy inputs for a example
             if self.audio_in is not None:
                 feats, feats_len = self.load_feats(self.audio_in)
-                for feat, len in zip(feats, feats_len):
-                    m(feat, len)
+                for i, (feat, len) in enumerate(zip(feats, feats_len)):
+                    with torch.no_grad():
+                        m(feat, len)
             else:
                 dummy_input = model.get_dummy_inputs()
                 m(*dummy_input)
@@ -148,7 +149,7 @@
         feats = []
         feats_len = []
         for line in wav_list:
-            name, path = line.strip().split()
+            path = line.strip()
             waveform, sampling_rate = torchaudio.load(path)
             if sampling_rate != self.frontend.fs:
                 waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate,
@@ -184,6 +185,7 @@
         model, asr_train_args = ASRTask.build_model_from_file(
             asr_train_config, asr_model_file, cmvn_file, 'cpu'
         )
+        self.frontend = model.frontend
         self._export(model, tag_name)
             
 

--
Gitblit v1.9.1