From 3df109adfccedeb134dea4ba2ea9a2da89872048 Mon Sep 17 00:00:00 2001
From: Isuxiz Slidder <48672727+Isuxiz@users.noreply.github.com>
Date: 星期一, 31 三月 2025 17:51:52 +0800
Subject: [PATCH] Update model.py to fix "IndexError: index 1 is out of bounds for dimension 1 with size 0" (#2454)

---
 funasr/utils/load_utils.py |   52 ++++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/funasr/utils/load_utils.py b/funasr/utils/load_utils.py
index faafc0f..c38f1d9 100644
--- a/funasr/utils/load_utils.py
+++ b/funasr/utils/load_utils.py
@@ -1,6 +1,7 @@
 import os
 import torch
 import json
+from io import BytesIO
 import torch.distributed as dist
 import numpy as np
 import kaldiio
@@ -17,6 +18,11 @@
 import pdb
 import subprocess
 from subprocess import CalledProcessError, run
+
+try:
+    from pydub import AudioSegment
+except:
+    pass
 
 
 def is_ffmpeg_installed():
@@ -76,12 +82,14 @@
                 for audio in data_or_path_or_list
             ]
     if isinstance(data_or_path_or_list, str) and data_or_path_or_list.startswith(
-            ("http://", "https://")
+        ("http://", "https://")
     ):  # download url to local file
         data_or_path_or_list = download_from_url(data_or_path_or_list)
 
-    if isinstance(data_or_path_or_list, str) and os.path.exists(data_or_path_or_list):  # local file
+    if (isinstance(data_or_path_or_list, str) and os.path.exists(data_or_path_or_list)) or hasattr(data_or_path_or_list, 'read'):  # local file or bytes io
         if data_type is None or data_type == "sound":
+            if hasattr(data_or_path_or_list, "read") and hasattr(data_or_path_or_list, "seek"):
+                data_or_path_or_list.seek(0)
             # if use_ffmpeg:
             #     data_or_path_or_list = _load_audio_ffmpeg(data_or_path_or_list, sr=fs)
             #     data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze()  # [n_samples,]
@@ -99,7 +107,8 @@
                     data_or_path_or_list
                 ).squeeze()  # [n_samples,]
         elif data_type == "text" and tokenizer is not None:
-            data_or_path_or_list = tokenizer.encode(data_or_path_or_list)
+            with open(data_or_path_or_list, "r") as f:
+                data_or_path_or_list = tokenizer.encode(f.read().strip())
         elif data_type == "image":  # undo
             pass
         elif data_type == "video":  # undo
@@ -112,7 +121,7 @@
     elif isinstance(data_or_path_or_list, str) and data_type == "text" and tokenizer is not None:
         data_or_path_or_list = tokenizer.encode(data_or_path_or_list)
     elif isinstance(data_or_path_or_list, np.ndarray):  # audio sample point
-        data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze()  # [n_samples,]
+        data_or_path_or_list = torch.from_numpy(data_or_path_or_list)  # .squeeze()  # [n_samples,]
     elif isinstance(data_or_path_or_list, str) and data_type == "kaldi_ark":
         data_mat = kaldiio.load_mat(data_or_path_or_list)
         if isinstance(data_mat, tuple):
@@ -136,6 +145,10 @@
 
 
 def load_bytes(input):
+    try:
+        input = validate_frame_rate(input)
+    except:
+        pass
     middle_data = np.frombuffer(input, dtype=np.int16)
     middle_data = np.asarray(middle_data)
     if middle_data.dtype.kind not in "iu":
@@ -151,6 +164,37 @@
     return array
 
 
+def validate_frame_rate(
+    input,
+    fs: int = 16000,
+):
+
+    # 灏嗘枃浠惰鍙栦负瀛楄妭娴�
+    byte_data = BytesIO(input)
+
+    # 浣跨敤 pydub 鍔犺浇闊抽
+    try:
+        audio = AudioSegment.from_file(byte_data)
+    except:
+        raise RuntimeError(
+            "You are decoding the pcm data, please install pydub first. via `pip install pydub`."
+        )
+
+    # 纭繚閲囨牱鐜囦负 16000 Hz
+    if audio.frame_rate != fs:
+        audio = audio.set_frame_rate(fs)
+
+        # 灏嗛噸鏂伴噰鏍峰悗鐨勯煶棰戝鍑轰负瀛楄妭娴�
+        output = BytesIO()
+        audio.export(output, format="wav")
+        output.seek(0)
+
+        # 鑾峰彇閲嶆柊閲囨牱鍚庣殑瀛楄妭娴佹暟鎹�
+        input = output.read()
+
+    return input
+
+
 def extract_fbank(data, data_len=None, data_type: str = "sound", frontend=None, **kwargs):
     if isinstance(data, np.ndarray):
         data = torch.from_numpy(data)

--
Gitblit v1.9.1