From 22b928dd3ff37ccee57ab2b5c2e4fcda4d33d24d Mon Sep 17 00:00:00 2001
From: Shi Xian <40013335+R1ckShi@users.noreply.github.com>
Date: 星期四, 05 十二月 2024 19:30:30 +0800
Subject: [PATCH] Merge pull request #2269 from modelscope/dev_sx2

---
 funasr/utils/load_utils.py |   33 +++++++++++++++++++++++++++++++--
 1 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/funasr/utils/load_utils.py b/funasr/utils/load_utils.py
index faafc0f..9613d1b 100644
--- a/funasr/utils/load_utils.py
+++ b/funasr/utils/load_utils.py
@@ -1,6 +1,7 @@
 import os
 import torch
 import json
+from io import BytesIO
 import torch.distributed as dist
 import numpy as np
 import kaldiio
@@ -9,6 +10,7 @@
 import time
 import logging
 from torch.nn.utils.rnn import pad_sequence
+from pydub import AudioSegment
 
 try:
     from funasr.download.file import download_from_url
@@ -76,7 +78,7 @@
                 for audio in data_or_path_or_list
             ]
     if isinstance(data_or_path_or_list, str) and data_or_path_or_list.startswith(
-            ("http://", "https://")
+        ("http://", "https://")
     ):  # download url to local file
         data_or_path_or_list = download_from_url(data_or_path_or_list)
 
@@ -112,7 +114,7 @@
     elif isinstance(data_or_path_or_list, str) and data_type == "text" and tokenizer is not None:
         data_or_path_or_list = tokenizer.encode(data_or_path_or_list)
     elif isinstance(data_or_path_or_list, np.ndarray):  # audio sample point
-        data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze()  # [n_samples,]
+        data_or_path_or_list = torch.from_numpy(data_or_path_or_list)  # .squeeze()  # [n_samples,]
     elif isinstance(data_or_path_or_list, str) and data_type == "kaldi_ark":
         data_mat = kaldiio.load_mat(data_or_path_or_list)
         if isinstance(data_mat, tuple):
@@ -136,6 +138,7 @@
 
 
 def load_bytes(input):
+    # input = validate_frame_rate(input)
     middle_data = np.frombuffer(input, dtype=np.int16)
     middle_data = np.asarray(middle_data)
     if middle_data.dtype.kind not in "iu":
@@ -151,6 +154,32 @@
     return array
 
 
+def validate_frame_rate(
+    input,
+    fs: int = 16000,
+):
+
+    # 灏嗘枃浠惰鍙栦负瀛楄妭娴�
+    byte_data = BytesIO(input)
+
+    # 浣跨敤 pydub 鍔犺浇闊抽
+    audio = AudioSegment.from_file(byte_data)
+
+    # 纭繚閲囨牱鐜囦负 16000 Hz
+    if audio.frame_rate != fs:
+        audio = audio.set_frame_rate(fs)
+
+        # 灏嗛噸鏂伴噰鏍峰悗鐨勯煶棰戝鍑轰负瀛楄妭娴�
+        output = BytesIO()
+        audio.export(output, format="wav")
+        output.seek(0)
+
+        # 鑾峰彇閲嶆柊閲囨牱鍚庣殑瀛楄妭娴佹暟鎹�
+        input = output.read()
+
+    return input
+
+
 def extract_fbank(data, data_len=None, data_type: str = "sound", frontend=None, **kwargs):
     if isinstance(data, np.ndarray):
         data = torch.from_numpy(data)

--
Gitblit v1.9.1