From 2868fe3df4e92a6ae3e327faf6e57ea492e04124 Mon Sep 17 00:00:00 2001
From: 志浩 <neo.dzh@alibaba-inc.com>
Date: 星期四, 16 三月 2023 19:24:21 +0800
Subject: [PATCH] Merge branch 'main' into dev_dzh

---
 funasr/datasets/iterable_dataset.py |   35 +++++++++++++++++++++++++----------
 1 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/funasr/datasets/iterable_dataset.py b/funasr/datasets/iterable_dataset.py
index 2001df9..c8c51d4 100644
--- a/funasr/datasets/iterable_dataset.py
+++ b/funasr/datasets/iterable_dataset.py
@@ -8,6 +8,7 @@
 from typing import Iterator
 from typing import Tuple
 from typing import Union
+from typing import List
 
 import kaldiio
 import numpy as np
@@ -66,7 +67,7 @@
     return load_bytes(bytes)
 
 DATA_TYPES = {
-    "sound": lambda x: torchaudio.load(x)[0][0].numpy(),
+    "sound": lambda x: torchaudio.load(x)[0].numpy(),
     "pcm": load_pcm,
     "kaldi_ark": load_kaldi,
     "bytes": load_bytes,
@@ -106,6 +107,7 @@
             ] = None,
             float_dtype: str = "float32",
             fs: dict = None,
+            mc: bool = False,
             int_dtype: str = "long",
             key_file: str = None,
     ):
@@ -122,12 +124,13 @@
         self.int_dtype = int_dtype
         self.key_file = key_file
         self.fs = fs
+        self.mc = mc
 
         self.debug_info = {}
         non_iterable_list = []
         self.path_name_type_list = []
 
-        if not isinstance(path_name_type_list[0], Tuple):
+        if not isinstance(path_name_type_list[0], (Tuple, List)):
             path = path_name_type_list[0]
             name = path_name_type_list[1]
             _type = path_name_type_list[2]
@@ -192,6 +195,7 @@
                         array = torchaudio.transforms.Resample(orig_freq=audio_fs,
                                                        new_freq=model_fs)(array)
                         array = array.squeeze(0).numpy()
+
                 data[name] = array
 
                 if self.preprocess is not None:
@@ -224,7 +228,7 @@
                 name = self.path_name_type_list[i][1]
                 _type = self.path_name_type_list[i][2]
                 if _type == "sound":
-                    audio_type = os.path.basename(value).split(".")[1].lower()
+                    audio_type = os.path.basename(value).split(".")[-1].lower()
                     if audio_type not in SUPPORT_AUDIO_TYPE_SETS:
                         raise NotImplementedError(
                             f'Not supported audio type: {audio_type}')
@@ -238,11 +242,17 @@
                     model_fs = self.fs["model_fs"]
                     if audio_fs is not None and model_fs is not None:
                         array = torch.from_numpy(array)
-                        array = array.unsqueeze(0)
                         array = torchaudio.transforms.Resample(orig_freq=audio_fs,
                                                                new_freq=model_fs)(array)
-                        array = array.squeeze(0).numpy()
-                data[name] = array
+                        array = array.numpy()
+                        
+                if _type == "sound":
+                    if self.mc:
+                        data[name] = array.transpose((1, 0))
+                    else:
+                        data[name] = array[0]
+                else:
+                    data[name] = array
 
                 if self.preprocess is not None:
                     data = self.preprocess(uid, data)
@@ -326,7 +336,7 @@
                 # 2.a. Load data streamingly
                 for value, (path, name, _type) in zip(values, self.path_name_type_list):
                     if _type == "sound":
-                        audio_type = os.path.basename(value).split(".")[1].lower()
+                        audio_type = os.path.basename(value).split(".")[-1].lower()
                         if audio_type not in SUPPORT_AUDIO_TYPE_SETS:
                             raise NotImplementedError(
                                 f'Not supported audio type: {audio_type}')
@@ -340,11 +350,16 @@
                         model_fs = self.fs["model_fs"]
                         if audio_fs is not None and model_fs is not None:
                             array = torch.from_numpy(array)
-                            array = array.unsqueeze(0)
                             array = torchaudio.transforms.Resample(orig_freq=audio_fs,
                                                                    new_freq=model_fs)(array)
-                            array = array.squeeze(0).numpy()
-                    data[name] = array
+                            array = array.numpy()
+                    if _type == "sound":
+                        if self.mc:
+                            data[name] = array.transpose((1, 0))
+                        else:
+                            data[name] = array[0]
+                    else:
+                        data[name] = array
                 if self.non_iterable_dataset is not None:
                     # 2.b. Load data from non-iterable dataset
                     _, from_non_iterable = self.non_iterable_dataset[uid]

--
Gitblit v1.9.1