From 0ac06c029edb57e2dcacd64da2a05869a2f7364d Mon Sep 17 00:00:00 2001
From: 志浩 <neo.dzh@alibaba-inc.com>
Date: 星期四, 16 三月 2023 19:24:15 +0800
Subject: [PATCH] fixbug path_name_type_list can [[any,str,str],[any,str,str]]

---
 funasr/datasets/iterable_dataset.py |   46 ++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 42 insertions(+), 4 deletions(-)

diff --git a/funasr/datasets/iterable_dataset.py b/funasr/datasets/iterable_dataset.py
index 1fc9270..3798280 100644
--- a/funasr/datasets/iterable_dataset.py
+++ b/funasr/datasets/iterable_dataset.py
@@ -8,10 +8,10 @@
 from typing import Iterator
 from typing import Tuple
 from typing import Union
+from typing import List
 
 import kaldiio
 import numpy as np
-import soundfile
 import torch
 import torchaudio
 from torch.utils.data.dataset import IterableDataset
@@ -21,7 +21,7 @@
 from funasr.datasets.dataset import ESPnetDataset
 
 
-SUPPORT_AUDIO_TYPE_SETS = ['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma']
+SUPPORT_AUDIO_TYPE_SETS = ['flac', 'mp3', 'ogg', 'opus', 'wav', 'pcm']
 
 def load_kaldi(input):
     retval = kaldiio.load_mat(input)
@@ -61,9 +61,14 @@
     array = np.frombuffer((middle_data.astype(dtype) - offset) / abs_max, dtype=np.float32)
     return array
 
+def load_pcm(input):
+    with open(input,"rb") as f:
+        bytes = f.read()
+    return load_bytes(bytes)
 
 DATA_TYPES = {
     "sound": lambda x: torchaudio.load(x)[0][0].numpy(),
+    "pcm": load_pcm,
     "kaldi_ark": load_kaldi,
     "bytes": load_bytes,
     "waveform": lambda x: x,
@@ -101,6 +106,7 @@
                 [str, Dict[str, np.ndarray]], Dict[str, np.ndarray]
             ] = None,
             float_dtype: str = "float32",
+            fs: dict = None,
             int_dtype: str = "long",
             key_file: str = None,
     ):
@@ -116,12 +122,13 @@
         self.float_dtype = float_dtype
         self.int_dtype = int_dtype
         self.key_file = key_file
+        self.fs = fs
 
         self.debug_info = {}
         non_iterable_list = []
         self.path_name_type_list = []
 
-        if not isinstance(path_name_type_list[0], Tuple):
+        if not isinstance(path_name_type_list[0], (Tuple, List)):
             path = path_name_type_list[0]
             name = path_name_type_list[1]
             _type = path_name_type_list[2]
@@ -175,6 +182,15 @@
             _type = self.path_name_type_list[0][2]
             func = DATA_TYPES[_type]
             array = func(value)
+            if self.fs is not None and name == "speech":
+                audio_fs = self.fs["audio_fs"]
+                model_fs = self.fs["model_fs"]
+                if audio_fs is not None and model_fs is not None:
+                    array = torch.from_numpy(array)
+                    array = array.unsqueeze(0)
+                    array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                   new_freq=model_fs)(array)
+                    array = array.squeeze(0).numpy()
             data[name] = array
 
             if self.preprocess is not None:
@@ -209,8 +225,20 @@
                 if audio_type not in SUPPORT_AUDIO_TYPE_SETS:
                     raise NotImplementedError(
                         f'Not supported audio type: {audio_type}')
+                if audio_type == "pcm":
+                    _type = "pcm"
+
             func = DATA_TYPES[_type]
             array = func(value)
+            if self.fs is not None and name == "speech":
+                audio_fs = self.fs["audio_fs"]
+                model_fs = self.fs["model_fs"]
+                if audio_fs is not None and model_fs is not None:
+                    array = torch.from_numpy(array)
+                    array = array.unsqueeze(0)
+                    array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                           new_freq=model_fs)(array)
+                    array = array.squeeze(0).numpy()
             data[name] = array
 
             if self.preprocess is not None:
@@ -299,9 +327,20 @@
                         if audio_type not in SUPPORT_AUDIO_TYPE_SETS:
                             raise NotImplementedError(
                                 f'Not supported audio type: {audio_type}')
+                        if audio_type == "pcm":
+                            _type = "pcm"
                     func = DATA_TYPES[_type]
                     # Load entry
                     array = func(value)
+                    if self.fs is not None and name == "speech":
+                        audio_fs = self.fs["audio_fs"]
+                        model_fs = self.fs["model_fs"]
+                        if audio_fs is not None and model_fs is not None:
+                            array = torch.from_numpy(array)
+                            array = array.unsqueeze(0)
+                            array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                                   new_freq=model_fs)(array)
+                            array = array.squeeze(0).numpy()
                     data[name] = array
                 if self.non_iterable_dataset is not None:
                     # 2.b. Load data from non-iterable dataset
@@ -335,4 +374,3 @@
 
         if count == 0:
             raise RuntimeError("No iteration")
-

--
Gitblit v1.9.1