From 7da5b31e25845905b814dfa6282ebf09ada329d5 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 27 六月 2023 16:57:43 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add

---
 funasr/utils/wav_utils.py |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/funasr/utils/wav_utils.py b/funasr/utils/wav_utils.py
index a6e394f..bd067c2 100644
--- a/funasr/utils/wav_utils.py
+++ b/funasr/utils/wav_utils.py
@@ -166,7 +166,9 @@
         try:
             waveform, audio_sr = torchaudio.load(wav_file)
         except:
-            waveform, audio_sr = soundfile.read(wav_file)
+            waveform, audio_sr = soundfile.read(wav_file, dtype='float32')
+            if waveform.ndim == 2:
+                waveform = waveform[:, 0]
             waveform = torch.tensor(np.expand_dims(waveform, axis=0))
         waveform = waveform * (1 << 15)
         waveform = torch_resample(waveform, audio_sr, model_sr)
@@ -187,9 +189,9 @@
 
 def wav2num_frame(wav_path, frontend_conf):
     try:
-        waveform, audio_sr = torchaudio.load(wav_file)
+        waveform, sampling_rate = torchaudio.load(wav_path)
     except:
-        waveform, audio_sr = soundfile.read(wav_file)
+        waveform, sampling_rate = soundfile.read(wav_path)
         waveform = torch.tensor(np.expand_dims(waveform, axis=0))
     speech_length = (waveform.shape[1] / sampling_rate) * 1000.
     n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])

--
Gitblit v1.9.1