# network architecture
|
# encoder related
|
encoder: transformer
|
encoder_conf:
|
output_size: 256 # dimension of attention
|
attention_heads: 4
|
linear_units: 2048 # the number of units of position-wise feed forward
|
num_blocks: 12 # the number of encoder blocks
|
dropout_rate: 0.1
|
positional_dropout_rate: 0.1
|
attention_dropout_rate: 0.0
|
input_layer: conv2d # encoder architecture type
|
normalize_before: true
|
|
# decoder related
|
decoder: transformer
|
decoder_conf:
|
attention_heads: 4
|
linear_units: 2048
|
num_blocks: 6
|
dropout_rate: 0.1
|
positional_dropout_rate: 0.1
|
self_attention_dropout_rate: 0.0
|
src_attention_dropout_rate: 0.0
|
|
# hybrid CTC/attention
|
model_conf:
|
ctc_weight: 0.3
|
lsm_weight: 0.1 # label smoothing option
|
length_normalized_loss: false
|
|
# optimization related
|
accum_grad: 2
|
grad_clip: 5
|
patience: none
|
max_epoch: 50
|
val_scheduler_criterion:
|
- valid
|
- acc
|
best_model_criterion:
|
- - valid
|
- acc
|
- max
|
keep_nbest_models: 10
|
|
optim: adam
|
optim_conf:
|
lr: 0.002
|
scheduler: warmuplr # pytorch v1.1.0+ required
|
scheduler_conf:
|
warmup_steps: 25000
|
|
specaug: specaug
|
specaug_conf:
|
apply_time_warp: true
|
time_warp_window: 5
|
time_warp_mode: bicubic
|
apply_freq_mask: true
|
freq_mask_width_range:
|
- 0
|
- 30
|
num_freq_mask: 2
|
apply_time_mask: true
|
time_mask_width_range:
|
- 0
|
- 40
|
num_time_mask: 2
|
|
log_interval: 50
|
normalize: None
|
|
dataset_conf:
|
shuffle: True
|
shuffle_conf:
|
shuffle_size: 2048
|
sort_size: 500
|
batch_conf:
|
batch_type: token
|
batch_size: 25000
|
num_workers: 8
|