| | |
| | | import os |
| | | import torch |
| | | import json |
| | | from io import BytesIO |
| | | import torch.distributed as dist |
| | | import numpy as np |
| | | import kaldiio |
| | |
| | | import time |
| | | import logging |
| | | from torch.nn.utils.rnn import pad_sequence |
| | | from pydub import AudioSegment |
| | | |
| | | try: |
| | | from funasr.download.file import download_from_url |
| | |
| | | for audio in data_or_path_or_list |
| | | ] |
| | | if isinstance(data_or_path_or_list, str) and data_or_path_or_list.startswith( |
| | | ("http://", "https://") |
| | | ("http://", "https://") |
| | | ): # download url to local file |
| | | data_or_path_or_list = download_from_url(data_or_path_or_list) |
| | | |
| | |
| | | elif isinstance(data_or_path_or_list, str) and data_type == "text" and tokenizer is not None: |
| | | data_or_path_or_list = tokenizer.encode(data_or_path_or_list) |
| | | elif isinstance(data_or_path_or_list, np.ndarray): # audio sample point |
| | | data_or_path_or_list = torch.from_numpy(data_or_path_or_list).squeeze() # [n_samples,] |
| | | data_or_path_or_list = torch.from_numpy(data_or_path_or_list) # .squeeze() # [n_samples,] |
| | | elif isinstance(data_or_path_or_list, str) and data_type == "kaldi_ark": |
| | | data_mat = kaldiio.load_mat(data_or_path_or_list) |
| | | if isinstance(data_mat, tuple): |
| | |
| | | |
| | | |
| | | def load_bytes(input): |
| | | # input = validate_frame_rate(input) |
| | | middle_data = np.frombuffer(input, dtype=np.int16) |
| | | middle_data = np.asarray(middle_data) |
| | | if middle_data.dtype.kind not in "iu": |
| | |
| | | return array |
| | | |
| | | |
| | | def validate_frame_rate( |
| | | input, |
| | | fs: int = 16000, |
| | | ): |
| | | |
| | | # 将文件读取为字节流 |
| | | byte_data = BytesIO(input) |
| | | |
| | | # 使用 pydub 加载音频 |
| | | audio = AudioSegment.from_file(byte_data) |
| | | |
| | | # 确保采样率为 16000 Hz |
| | | if audio.frame_rate != fs: |
| | | audio = audio.set_frame_rate(fs) |
| | | |
| | | # 将重新采样后的音频导出为字节流 |
| | | output = BytesIO() |
| | | audio.export(output, format="wav") |
| | | output.seek(0) |
| | | |
| | | # 获取重新采样后的字节流数据 |
| | | input = output.read() |
| | | |
| | | return input |
| | | |
| | | |
| | | def extract_fbank(data, data_len=None, data_type: str = "sound", frontend=None, **kwargs): |
| | | if isinstance(data, np.ndarray): |
| | | data = torch.from_numpy(data) |