| | |
| | | data_names = args.dataset_conf.get("data_names", "speech,text").split(",") |
| | | data_types = args.dataset_conf.get("data_types", "sound,text").split(",") |
| | | file_names = args.data_file_names.split(",") |
| | | print("data_names: {}, data_types: {}, file_names: {}".format(data_names, data_types, file_names)) |
| | | assert len(data_names) == len(data_types) == len(file_names) |
| | | if args.dataset_type == "small": |
| | | args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "{}_shape".format(data_names[0]))] |
| | | args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}}_shape".format(data_names[0]))] |
| | | args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}_shape".format(data_names[0]))] |
| | | args.train_data_path_and_name_and_type, args.valid_data_path_and_name_and_type = [], [] |
| | | for file_name, data_name, data_type in zip(file_names, data_names, data_types): |
| | | args.train_data_path_and_name_and_type.append( |