From 3d9f094e9652d4b84894c6fd4eae39a4a753b0f0 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 16 五月 2023 23:48:00 +0800
Subject: [PATCH] train
---
funasr/utils/prepare_data.py | 147 ++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 121 insertions(+), 26 deletions(-)
diff --git a/funasr/utils/prepare_data.py b/funasr/utils/prepare_data.py
index a0d97f6..3f55170 100644
--- a/funasr/utils/prepare_data.py
+++ b/funasr/utils/prepare_data.py
@@ -1,9 +1,11 @@
-import os
import logging
+import os
+import shutil
from multiprocessing import Pool
import numpy as np
import torch.distributed as dist
+import torchaudio
def filter_wav_text(data_dir, dataset):
@@ -25,7 +27,7 @@
parts = line.strip().split()
if len(parts) < 2:
continue
- text_dict[parts[0]] = " ".join(parts[1:]).lower()
+ text_dict[parts[0]] = " ".join(parts[1:])
filter_count = 0
with open(wav_file, "w") as f_wav, open(text_file, "w") as f_text:
for sample_name, wav_path in wav_dict.items():
@@ -34,39 +36,50 @@
f_text.write(sample_name + " " + text_dict[sample_name] + "\n")
else:
filter_count += 1
- logging.info("{}/{} samples in {} are filtered because of the mismatch between wav.scp and text".format(len(wav_lines),
- filter_count,
- dataset))
+ logging.info("{}/{} samples in {} are filtered because of the mismatch between wav.scp and text".
+ format(filter_count, len(wav_lines), dataset))
-def calc_shape_core(root_path, frontend_conf, speech_length_min, speech_length_max, idx):
+def wav2num_frame(wav_path, frontend_conf):
+ waveform, sampling_rate = torchaudio.load(wav_path)
+ n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])
+ feature_dim = frontend_conf["n_mels"] * frontend_conf["lfr_m"]
+ return n_frames, feature_dim
+
+
+def calc_shape_core(root_path, args, idx):
wav_scp_file = os.path.join(root_path, "wav.scp.{}".format(idx))
shape_file = os.path.join(root_path, "speech_shape.{}".format(idx))
with open(wav_scp_file) as f:
lines = f.readlines()
+ frontend_conf = args.frontend_conf
+ dataset_conf = args.dataset_conf
+ speech_length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "speech_length_min") else -1
+ speech_length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "speech_length_max") else -1
with open(shape_file, "w") as f:
for line in lines:
sample_name, wav_path = line.strip().split()
- n_frames, feature_dim, speech_length = wav2num_frame(wav_path, frontend_conf)
+ n_frames, feature_dim = wav2num_frame(wav_path, frontend_conf)
write_flag = True
- if speech_length_min > 0 and speech_length < speech_length_min:
- write_flag = False
- if speech_length_max > 0 and speech_length > speech_length_max:
- write_flag = False
+ if n_frames > 0 and speech_length_min > 0:
+ write_flag = n_frames >= speech_length_min
+ if n_frames > 0 and speech_length_max > 0:
+ write_flag = n_frames <= speech_length_max
if write_flag:
f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
f.flush()
-def calc_shape(args, dataset, nj=32):
+def calc_shape(args, dataset, nj=64):
shape_path = os.path.join(args.data_dir, dataset, "speech_shape")
if os.path.exists(shape_path):
- print('Shape file for small dataset already exists.')
+ logging.info('Shape file for small dataset already exists.')
return
split_shape_path = os.path.join(args.data_dir, dataset, "shape_files")
- if os.path
- os.makedirs(split_shape_path, exist_ok=True)
+ if os.path.exists(split_shape_path):
+ shutil.rmtree(split_shape_path)
+ os.mkdir(split_shape_path)
# split
wav_scp_file = os.path.join(args.data_dir, dataset, "wav.scp")
@@ -77,7 +90,7 @@
start = 0
for i in range(nj):
end = start + num_job_lines
- file = os.path.join(shape_path, "wav.scp.{}".format(str(i + 1)))
+ file = os.path.join(split_shape_path, "wav.scp.{}".format(str(i + 1)))
with open(file, "w") as f:
if i == nj - 1:
f.writelines(lines[start:])
@@ -87,28 +100,110 @@
p = Pool(nj)
for i in range(nj):
- p.apply_async(calc_shape_core,
- args=(shape_path, frontend_conf, speech_length_min, speech_length_max, str(i + 1)))
- print('Generating shape files, please wait a few minutes...')
+ p.apply_async(calc_shape_core, args=(split_shape_path, args, str(i + 1)))
+ logging.info("Generating shape files, please wait a few minutes...")
p.close()
p.join()
# combine
- file = os.path.join(data_dir, dataset, "speech_shape")
- with open(file, "w") as f:
+ with open(shape_path, "w") as f:
for i in range(nj):
- job_file = os.path.join(shape_path, "speech_shape.{}".format(str(i + 1)))
+ job_file = os.path.join(split_shape_path, "speech_shape.{}".format(str(i + 1)))
with open(job_file) as job_f:
lines = job_f.readlines()
f.writelines(lines)
- print('Generating shape files done.')
+ logging.info('Generating shape files done.')
+
+
+def generate_data_list(data_dir, dataset, nj=64):
+ list_file = os.path.join(data_dir, dataset, "data.list")
+ if os.path.exists(list_file):
+ logging.info('Data list for large dataset already exists.')
+ return
+ split_path = os.path.join(data_dir, dataset, "split")
+ if os.path.exists(split_path):
+ shutil.rmtree(split_path)
+ os.mkdir(split_path)
+
+ with open(os.path.join(data_dir, dataset, "wav.scp")) as f_wav:
+ wav_lines = f_wav.readlines()
+ with open(os.path.join(data_dir, dataset, "text")) as f_text:
+ text_lines = f_text.readlines()
+ num_lines = len(wav_lines)
+ num_job_lines = num_lines // nj
+ start = 0
+ for i in range(nj):
+ end = start + num_job_lines
+ split_path_nj = os.path.join(split_path, str(i + 1))
+ os.mkdir(split_path_nj)
+ wav_file = os.path.join(split_path_nj, "wav.scp")
+ text_file = os.path.join(split_path_nj, "text")
+ with open(wav_file, "w") as fw, open(text_file, "w") as ft:
+ if i == nj - 1:
+ fw.writelines(wav_lines[start:])
+ ft.writelines(text_lines[start:])
+ else:
+ fw.writelines(wav_lines[start:end])
+ ft.writelines(text_lines[start:end])
+ start = end
+
+ with open(list_file, "w") as f_data:
+ for i in range(nj):
+ wav_path = os.path.join(split_path, str(i + 1), "wav.scp")
+ text_path = os.path.join(split_path, str(i + 1), "text")
+ f_data.write(wav_path + " " + text_path + "\n")
def prepare_data(args, distributed_option):
distributed = distributed_option.distributed
if not distributed or distributed_option.dist_rank == 0:
filter_wav_text(args.data_dir, args.train_set)
- filter_wav_text(args.data_dir, args.dev_set)
- dist.barrier()
+ filter_wav_text(args.data_dir, args.valid_set)
- if args.dataset_type == "small" and args.train_shape_file is None:
+ if args.dataset_type == "small":
+ calc_shape(args, args.train_set)
+ calc_shape(args, args.valid_set)
+
+ if args.dataset_type == "large":
+ generate_data_list(args.data_dir, args.train_set)
+ generate_data_list(args.data_dir, args.valid_set)
+
+ if args.dataset_type == "small":
+ args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "speech_shape")]
+ args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "speech_shape")]
+ data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
+ data_types = args.dataset_conf.get("data_types", "sound,text").split(",")
+ args.train_data_path_and_name_and_type = [
+ ["{}/{}/wav.scp".format(args.data_dir, args.train_set), data_names[0], data_types[0]],
+ ["{}/{}/text".format(args.data_dir, args.train_set), data_names[1], data_types[1]]
+ ]
+ args.valid_data_path_and_name_and_type = [
+ ["{}/{}/wav.scp".format(args.data_dir, args.valid_set), data_names[0], data_types[0]],
+ ["{}/{}/text".format(args.data_dir, args.valid_set), data_names[1], data_types[1]]
+ ]
+ if args.embed_path is not None:
+ args.train_data_path_and_name_and_type.append(
+ [os.path.join(args.embed_path, "embeds", args.train_set, "embeds.scp"), "embed", "kaldi_ark"])
+ args.valid_data_path_and_name_and_type.append(
+ [os.path.join(args.embed_path, "embeds", args.valid_set, "embeds.scp"), "embed", "kaldi_ark"])
+ else:
+ args.train_data_file = os.path.join(args.data_dir, args.train_set, "data.list")
+ args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data.list")
+ if args.embed_path is not None:
+ if not distributed or distributed_option.dist_rank == 0:
+ for d in [args.train_set, args.valid_set]:
+ file = os.path.join(args.data_dir, d, "data.list")
+ with open(file) as f:
+ lines = f.readlines()
+ out_file = os.path.join(args.data_dir, d, "data_with_embed.list")
+ with open(out_file, "w") as out_f:
+ for line in lines:
+ parts = line.strip().split()
+ idx = parts[0].split("/")[-2]
+ embed_file = os.path.join(args.embed_path, "embeds", args.valid_set, "ark",
+ "embeds.{}.ark".format(idx))
+ out_f.write(parts[0] + " " + parts[1] + " " + embed_file + "\n")
+ args.train_data_file = os.path.join(args.data_dir, args.train_set, "data_with_embed.list")
+ args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data_with_embed.list")
+ if distributed:
+ dist.barrier()
--
Gitblit v1.9.1