| | |
| | | |
| | | inference_config=conf/decode_asr_transformer.yaml |
| | | #inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml |
| | | inference_asr_model=valid.acc.ave_10best.pth |
| | | inference_asr_model=valid.acc.ave_10best.pb |
| | | |
| | | # you can set gpu num for decoding here |
| | | gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default |
| | |
| | | |
| | | inference_config=conf/decode_asr_transformer.yaml |
| | | #inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml |
| | | inference_asr_model=valid.acc.ave_10best.pth |
| | | inference_asr_model=valid.acc.ave_10best.pb |
| | | |
| | | # you can set gpu num for decoding here |
| | | gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default |
| | |
| | | data_names = args.dataset_conf.get("data_names", "speech,text").split(",") |
| | | data_types = args.dataset_conf.get("data_types", "sound,text").split(",") |
| | | file_names = args.data_file_names.split(",") |
| | | print("data_names: {}, data_types: {}, file_names: {}".format(data_names, data_types, file_names)) |
| | | assert len(data_names) == len(data_types) == len(file_names) |
| | | if args.dataset_type == "small": |
| | | args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "{}_shape".format(data_names[0]))] |
| | | args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}}_shape".format(data_names[0]))] |
| | | args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}_shape".format(data_names[0]))] |
| | | args.train_data_path_and_name_and_type, args.valid_data_path_and_name_and_type = [], [] |
| | | for file_name, data_name, data_type in zip(file_names, data_names, data_types): |
| | | args.train_data_path_and_name_and_type.append( |