From d393848a695622964ac1b5a7817fd4aee6ce99d8 Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期一, 26 六月 2023 17:26:52 +0800
Subject: [PATCH] Merge pull request #669 from alibaba-damo-academy/dev_lhn
---
funasr/utils/wav_utils.py | 13 +++++++++++--
1 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/funasr/utils/wav_utils.py b/funasr/utils/wav_utils.py
index ebb80d2..a6e394f 100644
--- a/funasr/utils/wav_utils.py
+++ b/funasr/utils/wav_utils.py
@@ -11,6 +11,7 @@
import numpy as np
import torch
import torchaudio
+import soundfile
import torchaudio.compliance.kaldi as kaldi
@@ -162,7 +163,11 @@
waveform = torch.from_numpy(waveform.reshape(1, -1))
else:
# load pcm from wav, and resample
- waveform, audio_sr = torchaudio.load(wav_file)
+ try:
+ waveform, audio_sr = torchaudio.load(wav_file)
+ except:
+ waveform, audio_sr = soundfile.read(wav_file)
+ waveform = torch.tensor(np.expand_dims(waveform, axis=0))
waveform = waveform * (1 << 15)
waveform = torch_resample(waveform, audio_sr, model_sr)
@@ -181,7 +186,11 @@
def wav2num_frame(wav_path, frontend_conf):
- waveform, sampling_rate = torchaudio.load(wav_path)
+ try:
+ waveform, audio_sr = torchaudio.load(wav_file)
+ except:
+ waveform, audio_sr = soundfile.read(wav_file)
+ waveform = torch.tensor(np.expand_dims(waveform, axis=0))
speech_length = (waveform.shape[1] / sampling_rate) * 1000.
n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])
feature_dim = frontend_conf["n_mels"] * frontend_conf["lfr_m"]
--
Gitblit v1.9.1