From 1cfb26afc519a634194a4eaa02f9b0969f5c2cbf Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期三, 09 八月 2023 16:48:21 +0800
Subject: [PATCH] Merge pull request #790 from alibaba-damo-academy/dev_wjm_modelscope
---
funasr/utils/prepare_data.py | 41 +++++++++++++++++++++++++----------------
1 files changed, 25 insertions(+), 16 deletions(-)
diff --git a/funasr/utils/prepare_data.py b/funasr/utils/prepare_data.py
index 8d82a2f..c9615e7 100644
--- a/funasr/utils/prepare_data.py
+++ b/funasr/utils/prepare_data.py
@@ -195,11 +195,35 @@
def prepare_data(args, distributed_option):
- distributed = distributed_option.distributed
data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
data_types = args.dataset_conf.get("data_types", "sound,text").split(",")
file_names = args.data_file_names.split(",")
batch_type = args.dataset_conf["batch_conf"]["batch_type"]
+ print("data_names: {}, data_types: {}, file_names: {}".format(data_names, data_types, file_names))
+ assert len(data_names) == len(data_types) == len(file_names)
+ if args.dataset_type == "small":
+ args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "{}_shape".format(data_names[0]))]
+ args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}_shape".format(data_names[0]))]
+ args.train_data_path_and_name_and_type, args.valid_data_path_and_name_and_type = [], []
+ for file_name, data_name, data_type in zip(file_names, data_names, data_types):
+ args.train_data_path_and_name_and_type.append(
+ ["{}/{}/{}".format(args.data_dir, args.train_set, file_name), data_name, data_type])
+ args.valid_data_path_and_name_and_type.append(
+ ["{}/{}/{}".format(args.data_dir, args.valid_set, file_name), data_name, data_type])
+ if os.path.exists(args.train_shape_file[0]):
+ assert os.path.exists(args.valid_shape_file[0])
+ print('shape file for small dataset already exists.')
+ return
+ else:
+ concat_data_name = "_".join(data_names)
+ args.train_data_file = os.path.join(args.data_dir, args.train_set, "{}_data.list".format(concat_data_name))
+ args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "{}_data.list".format(concat_data_name))
+ if os.path.exists(args.train_data_file):
+ assert os.path.exists(args.valid_data_file)
+ print('data list for large dataset already exists.')
+ return
+
+ distributed = distributed_option.distributed
if not distributed or distributed_option.dist_rank == 0:
if hasattr(args, "filter_input") and args.filter_input:
filter_wav_text(args.data_dir, args.train_set)
@@ -213,20 +237,5 @@
generate_data_list(args, args.data_dir, args.train_set)
generate_data_list(args, args.data_dir, args.valid_set)
- print("data_names: {}, data_types: {}, file_names: {}".format(data_names, data_types, file_names))
- assert len(data_names) == len(data_types) == len(file_names)
- if args.dataset_type == "small":
- args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "{}_shape".format(data_names[0]))]
- args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}_shape".format(data_names[0]))]
- args.train_data_path_and_name_and_type, args.valid_data_path_and_name_and_type = [], []
- for file_name, data_name, data_type in zip(file_names, data_names, data_types):
- args.train_data_path_and_name_and_type.append(
- ["{}/{}/{}".format(args.data_dir, args.train_set, file_name), data_name, data_type])
- args.valid_data_path_and_name_and_type.append(
- ["{}/{}/{}".format(args.data_dir, args.valid_set, file_name), data_name, data_type])
- else:
- concat_data_name = "_".join(data_names)
- args.train_data_file = os.path.join(args.data_dir, args.train_set, "{}_data.list".format(concat_data_name))
- args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "{}_data.list".format(concat_data_name))
if distributed:
dist.barrier()
--
Gitblit v1.9.1