From 7da5b31e25845905b814dfa6282ebf09ada329d5 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 27 六月 2023 16:57:43 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add
---
funasr/datasets/large_datasets/dataset.py | 4
funasr/utils/wav_utils.py | 8 +
funasr/datasets/iterable_dataset.py | 5
docs/benchmark/benchmark_pipeline_cer.md | 254 +++++++++++++++++++++++++-------------------------
funasr/bin/asr_inference_launch.py | 5
5 files changed, 143 insertions(+), 133 deletions(-)
diff --git a/docs/benchmark/benchmark_pipeline_cer.md b/docs/benchmark/benchmark_pipeline_cer.md
index 97776a6..d978f3e 100644
--- a/docs/benchmark/benchmark_pipeline_cer.md
+++ b/docs/benchmark/benchmark_pipeline_cer.md
@@ -45,156 +45,156 @@
### Chinese Dataset
-<table>
+<table border="1">
<tr align="center">
- <td>Model</td>
- <td>Offline/Online</td>
- <td colspan="2">Aishell1</td>
- <td colspan="4">Aishell2</td>
- <td colspan="3">WenetSpeech</td>
+ <td style="border: 1px solid">Model</td>
+ <td style="border: 1px solid">Offline/Online</td>
+ <td colspan="2" style="border: 1px solid">Aishell1</td>
+ <td colspan="4" style="border: 1px solid">Aishell2</td>
+ <td colspan="3" style="border: 1px solid">WenetSpeech</td>
</tr>
<tr align="center">
- <td></td>
- <td></td>
- <td>dev</td>
- <td>test</td>
- <td>dev_ios</td>
- <td>test_ios</td>
- <td>test_android</td>
- <td>test_mic</td>
- <td>dev</td>
- <td>test_meeting</td>
- <td>test_net</td>
+ <td style="border: 1px solid"></td>
+ <td style="border: 1px solid"></td>
+ <td style="border: 1px solid">dev</td>
+ <td style="border: 1px solid">test</td>
+ <td style="border: 1px solid">dev_ios</td>
+ <td style="border: 1px solid">test_ios</td>
+ <td style="border: 1px solid">test_android</td>
+ <td style="border: 1px solid">test_mic</td>
+ <td style="border: 1px solid">dev</td>
+ <td style="border: 1px solid">test_meeting</td>
+ <td style="border: 1px solid">test_net</td>
</tr>
<tr align="center">
- <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large</a> </td>
- <td>Offline</td>
- <td>1.76</td>
- <td>1.94</td>
- <td>2.79</td>
- <td>2.84</td>
- <td>3.08</td>
- <td>3.03</td>
- <td>3.43</td>
- <td>7.01</td>
- <td>6.66</td>
+ <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">1.76</td>
+ <td style="border: 1px solid">1.94</td>
+ <td style="border: 1px solid">2.79</td>
+ <td style="border: 1px solid">2.84</td>
+ <td style="border: 1px solid">3.08</td>
+ <td style="border: 1px solid">3.03</td>
+ <td style="border: 1px solid">3.43</td>
+ <td style="border: 1px solid">7.01</td>
+ <td style="border: 1px solid">6.66</td>
</tr>
<tr align="center">
- <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large-long</a> </td>
- <td>Offline</td>
- <td>1.80</td>
- <td>2.10</td>
- <td>2.78</td>
- <td>2.87</td>
- <td>3.12</td>
- <td>3.11</td>
- <td>3.44</td>
- <td>13.28</td>
- <td>7.08</td>
+ <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large-long</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">1.80</td>
+ <td style="border: 1px solid">2.10</td>
+ <td style="border: 1px solid">2.78</td>
+ <td style="border: 1px solid">2.87</td>
+ <td style="border: 1px solid">3.12</td>
+ <td style="border: 1px solid">3.11</td>
+ <td style="border: 1px solid">3.44</td>
+ <td style="border: 1px solid">13.28</td>
+ <td style="border: 1px solid">7.08</td>
</tr>
<tr align="center">
- <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary">Paraformer-large-contextual</a> </td>
- <td>Offline</td>
- <td>1.76</td>
- <td>2.02</td>
- <td>2.73</td>
- <td>2.85</td>
- <td>2.98</td>
- <td>2.95</td>
- <td>3.42</td>
- <td>7.16</td>
- <td>6.72</td>
+ <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary">Paraformer-large-contextual</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">1.76</td>
+ <td style="border: 1px solid">2.02</td>
+ <td style="border: 1px solid">2.73</td>
+ <td style="border: 1px solid">2.85</td>
+ <td style="border: 1px solid">2.98</td>
+ <td style="border: 1px solid">2.95</td>
+ <td style="border: 1px solid">3.42</td>
+ <td style="border: 1px solid">7.16</td>
+ <td style="border: 1px solid">6.72</td>
</tr>
<tr align="center">
- <td> <a href="https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary">Paraformer</a> </td>
- <td>Offline</td>
- <td>3.24</td>
- <td>3.69</td>
- <td>4.58</td>
- <td>4.63</td>
- <td>4.83</td>
- <td>4.71</td>
- <td>4.19</td>
- <td>8.32</td>
- <td>9.19</td>
+ <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary">Paraformer</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">3.24</td>
+ <td style="border: 1px solid">3.69</td>
+ <td style="border: 1px solid">4.58</td>
+ <td style="border: 1px solid">4.63</td>
+ <td style="border: 1px solid">4.83</td>
+ <td style="border: 1px solid">4.71</td>
+ <td style="border: 1px solid">4.19</td>
+ <td style="border: 1px solid">8.32</td>
+ <td style="border: 1px solid">9.19</td>
</tr>
<tr align="center">
- <td> <a href="https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/summary">UniASR</a> </td>
- <td>Online</td>
- <td>3.34</td>
- <td>3.99</td>
- <td>4.62</td>
- <td>4.52</td>
- <td>4.77</td>
- <td>4.73</td>
- <td>4.51</td>
- <td>10.63</td>
- <td>9.70</td>
+ <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/summary">UniASR</a> </td>
+ <td style="border: 1px solid">Online</td>
+ <td style="border: 1px solid">3.34</td>
+ <td style="border: 1px solid">3.99</td>
+ <td style="border: 1px solid">4.62</td>
+ <td style="border: 1px solid">4.52</td>
+ <td style="border: 1px solid">4.77</td>
+ <td style="border: 1px solid">4.73</td>
+ <td style="border: 1px solid">4.51</td>
+ <td style="border: 1px solid">10.63</td>
+ <td style="border: 1px solid">9.70</td>
</tr>
<tr align="center">
- <td> <a href="https://modelscope.cn/models/damo/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary">UniASR-large</a> </td>
- <td>Offline</td>
- <td>2.93</td>
- <td>3.48</td>
- <td>3.95</td>
- <td>3.87</td>
- <td>4.11</td>
- <td>4.11</td>
- <td>4.16</td>
- <td>10.09</td>
- <td>8.69</td>
+ <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary">UniASR-large</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">2.93</td>
+ <td style="border: 1px solid">3.48</td>
+ <td style="border: 1px solid">3.95</td>
+ <td style="border: 1px solid">3.87</td>
+ <td style="border: 1px solid">4.11</td>
+ <td style="border: 1px solid">4.11</td>
+ <td style="border: 1px solid">4.16</td>
+ <td style="border: 1px solid">10.09</td>
+ <td style="border: 1px solid">8.69</td>
</tr>
<tr align="center">
- <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-aishell1-pytorch/summary">Paraformer-aishell</a> </td>
- <td>Offline</td>
- <td>4.88</td>
- <td>5.43</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
+ <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-aishell1-pytorch/summary">Paraformer-aishell</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">4.88</td>
+ <td style="border: 1px solid">5.43</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
</tr>
<tr align="center">
- <td> <a href="https://modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary">ParaformerBert-aishell</a> </td>
- <td>Offline</td>
- <td>6.14</td>
- <td>7.01</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
+ <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary">ParaformerBert-aishell</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">6.14</td>
+ <td style="border: 1px solid">7.01</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
</tr>
<tr align="center">
- <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">Paraformer-aishell2</a> </td>
- <td>Offline</td>
- <td>-</td>
- <td>-</td>
- <td>5.82</td>
- <td>6.30</td>
- <td>6.60</td>
- <td>5.83</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
+ <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">Paraformer-aishell2</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">5.82</td>
+ <td style="border: 1px solid">6.30</td>
+ <td style="border: 1px solid">6.60</td>
+ <td style="border: 1px solid">5.83</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
</tr>
<tr align="center">
- <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">ParaformerBert-aishell2</a> </td>
- <td>Offline</td>
- <td>-</td>
- <td>-</td>
- <td>4.95</td>
- <td>5.45</td>
- <td>5.59</td>
- <td>5.83</td>
- <td>-</td>
- <td>-</td>
- <td>-</td>
+ <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">ParaformerBert-aishell2</a> </td>
+ <td style="border: 1px solid">Offline</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">4.95</td>
+ <td style="border: 1px solid">5.45</td>
+ <td style="border: 1px solid">5.59</td>
+ <td style="border: 1px solid">5.83</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
+ <td style="border: 1px solid">-</td>
</tr>
</table>
diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
index ce1f984..5d1b804 100644
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -867,7 +867,10 @@
try:
raw_inputs = torchaudio.load(data_path_and_name_and_type[0])[0][0]
except:
- raw_inputs = torch.tensor(soundfile.read(data_path_and_name_and_type[0])[0])
+ raw_inputs = soundfile.read(data_path_and_name_and_type[0], dtype='float32')[0]
+ if raw_inputs.ndim == 2:
+ raw_inputs = raw_inputs[:, 0]
+ raw_inputs = torch.tensor(raw_inputs)
if data_path_and_name_and_type is None and raw_inputs is not None:
if isinstance(raw_inputs, np.ndarray):
raw_inputs = torch.tensor(raw_inputs)
diff --git a/funasr/datasets/iterable_dataset.py b/funasr/datasets/iterable_dataset.py
index fa0f0c7..d240d93 100644
--- a/funasr/datasets/iterable_dataset.py
+++ b/funasr/datasets/iterable_dataset.py
@@ -71,7 +71,10 @@
try:
return torchaudio.load(input)[0].numpy()
except:
- return np.expand_dims(soundfile.read(input)[0], axis=0)
+ waveform, _ = soundfile.read(input, dtype='float32')
+ if waveform.ndim == 2:
+ waveform = waveform[:, 0]
+ return np.expand_dims(waveform, axis=0)
DATA_TYPES = {
"sound": load_wav,
diff --git a/funasr/datasets/large_datasets/dataset.py b/funasr/datasets/large_datasets/dataset.py
index 844dde7..5f2c2c6 100644
--- a/funasr/datasets/large_datasets/dataset.py
+++ b/funasr/datasets/large_datasets/dataset.py
@@ -128,7 +128,9 @@
try:
waveform, sampling_rate = torchaudio.load(path)
except:
- waveform, sampling_rate = soundfile.read(path)
+ waveform, sampling_rate = soundfile.read(path, dtype='float32')
+ if waveform.ndim == 2:
+ waveform = waveform[:, 0]
waveform = np.expand_dims(waveform, axis=0)
waveform = torch.tensor(waveform)
if self.frontend_conf is not None:
diff --git a/funasr/utils/wav_utils.py b/funasr/utils/wav_utils.py
index a6e394f..bd067c2 100644
--- a/funasr/utils/wav_utils.py
+++ b/funasr/utils/wav_utils.py
@@ -166,7 +166,9 @@
try:
waveform, audio_sr = torchaudio.load(wav_file)
except:
- waveform, audio_sr = soundfile.read(wav_file)
+ waveform, audio_sr = soundfile.read(wav_file, dtype='float32')
+ if waveform.ndim == 2:
+ waveform = waveform[:, 0]
waveform = torch.tensor(np.expand_dims(waveform, axis=0))
waveform = waveform * (1 << 15)
waveform = torch_resample(waveform, audio_sr, model_sr)
@@ -187,9 +189,9 @@
def wav2num_frame(wav_path, frontend_conf):
try:
- waveform, audio_sr = torchaudio.load(wav_file)
+ waveform, sampling_rate = torchaudio.load(wav_path)
except:
- waveform, audio_sr = soundfile.read(wav_file)
+ waveform, sampling_rate = soundfile.read(wav_path)
waveform = torch.tensor(np.expand_dims(waveform, axis=0))
speech_length = (waveform.shape[1] / sampling_rate) * 1000.
n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])
--
Gitblit v1.9.1