From 9b4e9cc8a0311e5243d69b73ed073e7ea441982e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 三月 2024 16:05:29 +0800
Subject: [PATCH] train update
---
funasr/datasets/large_datasets/dataset.py | 18 ++++++++++--------
1 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/funasr/datasets/large_datasets/dataset.py b/funasr/datasets/large_datasets/dataset.py
index 1e9bb26..d3489c1 100644
--- a/funasr/datasets/large_datasets/dataset.py
+++ b/funasr/datasets/large_datasets/dataset.py
@@ -7,7 +7,8 @@
import torch.distributed as dist
import torchaudio
import numpy as np
-import soundfile
+# import librosa
+import librosa
from kaldiio import ReadHelper
from torch.utils.data import IterableDataset
@@ -108,7 +109,7 @@
ark_reader = ReadHelper('ark:{}'.format(data_file))
reader_list.append(ark_reader)
elif data_type == "text" or data_type == "sound" or data_type == 'text_hotword':
- text_reader = open(data_file, "r")
+ text_reader = open(data_file, "r", encoding="utf-8")
reader_list.append(text_reader)
elif data_type == "none":
continue
@@ -128,7 +129,8 @@
try:
waveform, sampling_rate = torchaudio.load(path)
except:
- waveform, sampling_rate = soundfile.read(path, dtype='float32')
+ # waveform, sampling_rate = librosa.load(path, dtype='float32')
+ waveform, sampling_rate = librosa.load(path, dtype='float32')
if waveform.ndim == 2:
waveform = waveform[:, 0]
waveform = np.expand_dims(waveform, axis=0)
@@ -205,7 +207,7 @@
# pre_prob = conf.get("pre_prob", 0) # unused yet
if pre_hwfile is not None:
pre_hwlist = []
- with open(pre_hwfile, 'r') as fin:
+ with open(pre_hwfile, 'r', encoding="utf-8") as fin:
for line in fin.readlines():
pre_hwlist.append(line.strip())
else:
@@ -229,15 +231,15 @@
mode=mode,
)
- filter_conf = conf.get('filter_conf', {})
- filter_fn = partial(filter, **filter_conf)
- dataset = FilterIterDataPipe(dataset, fn=filter_fn)
-
if "text" in data_names:
vocab = {'vocab': dict, 'seg_dict': seg_dict, 'punc_dict': punc_dict, 'bpe_tokenizer': bpe_tokenizer, 'hw_config': hw_config}
tokenize_fn = partial(tokenize, **vocab)
dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
+ filter_conf = conf.get('filter_conf', {})
+ filter_fn = partial(filter, **filter_conf)
+ dataset = FilterIterDataPipe(dataset, fn=filter_fn)
+
if shuffle:
buffer_conf = conf.get('shuffle_conf', {})
buffer_size = buffer_conf['shuffle_size']
--
Gitblit v1.9.1