From 450ed4f344fc6695336c36c2e4854152454c3d22 Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期日, 05 二月 2023 12:47:47 +0800
Subject: [PATCH] Merge pull request #61 from alibaba-damo-academy/dev_lhn

---
 funasr/bin/asr_inference_paraformer.py          |    2 ++
 funasr/bin/asr_inference_uniasr.py              |    2 ++
 funasr/bin/asr_inference.py                     |    2 ++
 funasr/bin/vad_inference.py                     |    3 +++
 funasr/tasks/abs_task.py                        |    2 ++
 funasr/datasets/iterable_dataset.py             |   31 +++++++++++++++++++++++++++++--
 funasr/bin/asr_inference_paraformer_vad_punc.py |    2 ++
 7 files changed, 42 insertions(+), 2 deletions(-)

diff --git a/funasr/bin/asr_inference.py b/funasr/bin/asr_inference.py
index f7ad9ae..ca0201a 100644
--- a/funasr/bin/asr_inference.py
+++ b/funasr/bin/asr_inference.py
@@ -534,6 +534,7 @@
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  ):
         # 3. Build data-iterator
@@ -544,6 +545,7 @@
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             dtype=dtype,
+            fs=fs,
             batch_size=batch_size,
             key_file=key_file,
             num_workers=num_workers,
diff --git a/funasr/bin/asr_inference_paraformer.py b/funasr/bin/asr_inference_paraformer.py
index 0929436..1455517 100644
--- a/funasr/bin/asr_inference_paraformer.py
+++ b/funasr/bin/asr_inference_paraformer.py
@@ -579,6 +579,7 @@
             data_path_and_name_and_type,
             raw_inputs: Union[np.ndarray, torch.Tensor] = None,
             output_dir_v2: Optional[str] = None,
+            fs: dict = None,
             param_dict: dict = None,
     ):
         # 3. Build data-iterator
@@ -589,6 +590,7 @@
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             dtype=dtype,
+            fs=fs,
             batch_size=batch_size,
             key_file=key_file,
             num_workers=num_workers,
diff --git a/funasr/bin/asr_inference_paraformer_vad_punc.py b/funasr/bin/asr_inference_paraformer_vad_punc.py
index 10c1da6..2702617 100644
--- a/funasr/bin/asr_inference_paraformer_vad_punc.py
+++ b/funasr/bin/asr_inference_paraformer_vad_punc.py
@@ -548,6 +548,7 @@
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  ):
         # 3. Build data-iterator
@@ -558,6 +559,7 @@
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             dtype=dtype,
+            fs=fs,
             batch_size=1,
             key_file=key_file,
             num_workers=num_workers,
diff --git a/funasr/bin/asr_inference_uniasr.py b/funasr/bin/asr_inference_uniasr.py
index a4bdcc1..8a99058 100644
--- a/funasr/bin/asr_inference_uniasr.py
+++ b/funasr/bin/asr_inference_uniasr.py
@@ -575,6 +575,7 @@
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  ):
         # 3. Build data-iterator
@@ -585,6 +586,7 @@
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             dtype=dtype,
+            fs=fs,
             batch_size=batch_size,
             key_file=key_file,
             num_workers=num_workers,
diff --git a/funasr/bin/vad_inference.py b/funasr/bin/vad_inference.py
index 679cc0b..eb51400 100644
--- a/funasr/bin/vad_inference.py
+++ b/funasr/bin/vad_inference.py
@@ -251,6 +251,7 @@
         dtype: str = "float32",
         seed: int = 0,
         num_workers: int = 1,
+        param_dict: dict = None,
         **kwargs,
 ):
     assert check_argument_types()
@@ -287,6 +288,8 @@
         data_path_and_name_and_type,
         raw_inputs: Union[np.ndarray, torch.Tensor] = None,
         output_dir_v2: Optional[str] = None,
+        fs: dict = None,
+        param_dict: dict = None,
     ):
         # 3. Build data-iterator
         loader = VADTask.build_streaming_iterator(
diff --git a/funasr/datasets/iterable_dataset.py b/funasr/datasets/iterable_dataset.py
index 1fc9270..2ac37b2 100644
--- a/funasr/datasets/iterable_dataset.py
+++ b/funasr/datasets/iterable_dataset.py
@@ -11,7 +11,6 @@
 
 import kaldiio
 import numpy as np
-import soundfile
 import torch
 import torchaudio
 from torch.utils.data.dataset import IterableDataset
@@ -101,6 +100,7 @@
                 [str, Dict[str, np.ndarray]], Dict[str, np.ndarray]
             ] = None,
             float_dtype: str = "float32",
+            fs: dict = None,
             int_dtype: str = "long",
             key_file: str = None,
     ):
@@ -116,6 +116,7 @@
         self.float_dtype = float_dtype
         self.int_dtype = int_dtype
         self.key_file = key_file
+        self.fs = fs
 
         self.debug_info = {}
         non_iterable_list = []
@@ -175,6 +176,15 @@
             _type = self.path_name_type_list[0][2]
             func = DATA_TYPES[_type]
             array = func(value)
+            if self.fs is not None and name == "speech":
+                audio_fs = self.fs["audio_fs"]
+                model_fs = self.fs["model_fs"]
+                if audio_fs is not None and model_fs is not None:
+                    array = torch.from_numpy(array)
+                    array = array.unsqueeze(0)
+                    array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                   new_freq=model_fs)(array)
+                    array = array.squeeze(0).numpy()
             data[name] = array
 
             if self.preprocess is not None:
@@ -211,6 +221,15 @@
                         f'Not supported audio type: {audio_type}')
             func = DATA_TYPES[_type]
             array = func(value)
+            if self.fs is not None and name == "speech":
+                audio_fs = self.fs["audio_fs"]
+                model_fs = self.fs["model_fs"]
+                if audio_fs is not None and model_fs is not None:
+                    array = torch.from_numpy(array)
+                    array = array.unsqueeze(0)
+                    array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                           new_freq=model_fs)(array)
+                    array = array.squeeze(0).numpy()
             data[name] = array
 
             if self.preprocess is not None:
@@ -302,6 +321,15 @@
                     func = DATA_TYPES[_type]
                     # Load entry
                     array = func(value)
+                    if self.fs is not None and name == "speech":
+                        audio_fs = self.fs["audio_fs"]
+                        model_fs = self.fs["model_fs"]
+                        if audio_fs is not None and model_fs is not None:
+                            array = torch.from_numpy(array)
+                            array = array.unsqueeze(0)
+                            array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                                   new_freq=model_fs)(array)
+                            array = array.squeeze(0).numpy()
                     data[name] = array
                 if self.non_iterable_dataset is not None:
                     # 2.b. Load data from non-iterable dataset
@@ -335,4 +363,3 @@
 
         if count == 0:
             raise RuntimeError("No iteration")
-
diff --git a/funasr/tasks/abs_task.py b/funasr/tasks/abs_task.py
index 5424f13..4e79c63 100644
--- a/funasr/tasks/abs_task.py
+++ b/funasr/tasks/abs_task.py
@@ -1783,6 +1783,7 @@
             collate_fn,
             key_file: str = None,
             batch_size: int = 1,
+            fs: dict = None,
             dtype: str = np.float32,
             num_workers: int = 1,
             allow_variable_data_keys: bool = False,
@@ -1800,6 +1801,7 @@
         dataset = IterableESPnetDataset(
             data_path_and_name_and_type,
             float_dtype=dtype,
+            fs=fs,
             preprocess=preprocess_fn,
             key_file=key_file,
         )

--
Gitblit v1.9.1