From fcc9c89eaba9a4e36c54958aeedeec7ab3756cd7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 13 二月 2023 17:43:31 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add
---
funasr/datasets/large_datasets/dataset.py | 37 +++++++++++++++++++++++++++++--------
1 files changed, 29 insertions(+), 8 deletions(-)
diff --git a/funasr/datasets/large_datasets/dataset.py b/funasr/datasets/large_datasets/dataset.py
index 60c5abd..2123737 100644
--- a/funasr/datasets/large_datasets/dataset.py
+++ b/funasr/datasets/large_datasets/dataset.py
@@ -1,8 +1,10 @@
import os
import random
+import numpy
from functools import partial
import torch
+import torchaudio
import torch.distributed as dist
from kaldiio import ReadHelper
from torch.utils.data import IterableDataset
@@ -12,6 +14,7 @@
from funasr.datasets.large_datasets.datapipes.map import MapperIterDataPipe
from funasr.datasets.large_datasets.utils.filter import filter
from funasr.datasets.large_datasets.utils.padding import padding
+from funasr.datasets.large_datasets.utils.clipping import clipping
from funasr.datasets.large_datasets.utils.tokenize import tokenize
@@ -97,9 +100,11 @@
if data_type == "kaldi_ark":
ark_reader = ReadHelper('ark:{}'.format(data_file))
reader_list.append(ark_reader)
- elif data_type == "text":
+ elif data_type == "text" or data_type == "sound":
text_reader = open(data_file, "r")
reader_list.append(text_reader)
+ elif data_type == "none":
+ continue
else:
raise TypeError("Data type {} is not supported".format(data_type))
@@ -109,6 +114,15 @@
if data_type == "kaldi_ark":
key, mat = item
sample_dict[data_name] = mat
+ if data_name == "speech":
+ sample_dict["key"] = key
+ elif data_type == "sound":
+ key, path = item.strip().split()
+ waveform, sampling_rate = torchaudio.load(path)
+ waveform = waveform.numpy()
+ mat = waveform[0]
+ sample_dict[data_name] = mat
+ sample_dict["sampling_rate"] = sampling_rate
if data_name == "speech":
sample_dict["key"] = key
else:
@@ -125,13 +139,18 @@
def len_fn_token(data):
assert "speech" in data
- return data["speech"].shape[0]
+ if "sampling_rate" in data:
+ return (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
+ else:
+ return data["speech"].shape[0]
def Dataset(data_list_file,
dict,
+ seg_dict,
conf,
- mode="train"):
+ mode="train",
+ batch_mode="padding"):
scp_lists = read_lists(data_list_file)
shuffle = conf.get('shuffle', True)
data_names = conf.get("data_names", "speech,text")
@@ -142,9 +161,10 @@
filter_fn = partial(filter, **filter_conf)
dataset = FilterIterDataPipe(dataset, fn=filter_fn)
- vocab = {'vocab': dict}
- tokenize_fn = partial(tokenize, **vocab)
- dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
+ if "text" in data_names:
+ vocab = {'vocab': dict, 'seg_dict': seg_dict}
+ tokenize_fn = partial(tokenize, **vocab)
+ dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
if shuffle:
buffer_conf = conf.get('shuffle_conf', {})
@@ -168,8 +188,9 @@
batch_size=batch_size,
len_fn=len_fn,
buffer_size=buffer_size,
- sort_size=sort_size)
+ sort_size=sort_size,
+ batch_mode=batch_mode)
- dataset = MapperIterDataPipe(dataset, fn=padding)
+ dataset = MapperIterDataPipe(dataset, fn=padding if batch_mode == "padding" else clipping)
return dataset
--
Gitblit v1.9.1