From 49e8e9d8fc1209c347aa2c2c65c6eb067b9f79d4 Mon Sep 17 00:00:00 2001
From: zhu-gu-an <76513567+zhu-gu-an@users.noreply.github.com>
Date: 星期六, 13 一月 2024 13:54:00 +0800
Subject: [PATCH] add triton paraformer large online (#1242)
---
funasr/utils/wav_utils.py | 10 ++++++----
1 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/funasr/utils/wav_utils.py b/funasr/utils/wav_utils.py
index a6e394f..8c2dc68 100644
--- a/funasr/utils/wav_utils.py
+++ b/funasr/utils/wav_utils.py
@@ -11,7 +11,7 @@
import numpy as np
import torch
import torchaudio
-import soundfile
+import librosa
import torchaudio.compliance.kaldi as kaldi
@@ -166,7 +166,9 @@
try:
waveform, audio_sr = torchaudio.load(wav_file)
except:
- waveform, audio_sr = soundfile.read(wav_file)
+ waveform, audio_sr = librosa.load(wav_file, dtype='float32')
+ if waveform.ndim == 2:
+ waveform = waveform[:, 0]
waveform = torch.tensor(np.expand_dims(waveform, axis=0))
waveform = waveform * (1 << 15)
waveform = torch_resample(waveform, audio_sr, model_sr)
@@ -187,9 +189,9 @@
def wav2num_frame(wav_path, frontend_conf):
try:
- waveform, audio_sr = torchaudio.load(wav_file)
+ waveform, sampling_rate = torchaudio.load(wav_path)
except:
- waveform, audio_sr = soundfile.read(wav_file)
+ waveform, sampling_rate = librosa.load(wav_path)
waveform = torch.tensor(np.expand_dims(waveform, axis=0))
speech_length = (waveform.shape[1] / sampling_rate) * 1000.
n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])
--
Gitblit v1.9.1