From abf5af40e934216b397c5331e0a68dc92f0a4f4e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 26 三月 2024 15:07:45 +0800
Subject: [PATCH] update

---
 funasr/utils/load_utils.py |   15 +++++++++++----
 funasr/__init__.py         |    4 +++-
 2 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/funasr/__init__.py b/funasr/__init__.py
index a5011bf..950c18e 100644
--- a/funasr/__init__.py
+++ b/funasr/__init__.py
@@ -31,4 +31,6 @@
 import_submodules(__name__)
 
 from funasr.auto.auto_model import AutoModel
-from funasr.auto.auto_frontend import AutoFrontend
\ No newline at end of file
+from funasr.auto.auto_frontend import AutoFrontend
+
+os.environ["HYDRA_FULL_ERROR"] = 1
\ No newline at end of file
diff --git a/funasr/utils/load_utils.py b/funasr/utils/load_utils.py
index 6f12f55..8ff7115 100644
--- a/funasr/utils/load_utils.py
+++ b/funasr/utils/load_utils.py
@@ -51,13 +51,20 @@
 
     if isinstance(data_or_path_or_list, str) and os.path.exists(data_or_path_or_list): # local file
         if data_type is None or data_type == "sound":
-            if use_ffmpeg:
-                data_or_path_or_list = _load_audio_ffmpeg(data_or_path_or_list, sr=fs)
-                data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze()  # [n_samples,]
-            else:
+            # if use_ffmpeg:
+            #     data_or_path_or_list = _load_audio_ffmpeg(data_or_path_or_list, sr=fs)
+            #     data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze()  # [n_samples,]
+            # else:
+            #     data_or_path_or_list, audio_fs = torchaudio.load(data_or_path_or_list)
+            #     if kwargs.get("reduce_channels", True):
+            #         data_or_path_or_list = data_or_path_or_list.mean(0)
+            try:
                 data_or_path_or_list, audio_fs = torchaudio.load(data_or_path_or_list)
                 if kwargs.get("reduce_channels", True):
                     data_or_path_or_list = data_or_path_or_list.mean(0)
+            except:
+                data_or_path_or_list = _load_audio_ffmpeg(data_or_path_or_list, sr=fs)
+                data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze()  # [n_samples,]
         elif data_type == "text" and tokenizer is not None:
             data_or_path_or_list = tokenizer.encode(data_or_path_or_list)
         elif data_type == "image": # undo

--
Gitblit v1.9.1