游雁
2023-04-21 3cd3473bf7a3b41484baa86d9092248d78e7af39
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
model: sond
model_conf:
    lsm_weight: 0.0
    length_normalized_loss: true
    max_spk_num: 16
 
# speech encoder
encoder: ecapa_tdnn
encoder_conf:
    # pass by model, equal to feature dim
    # input_size: 80
    pool_size: 20
    stride: 1
speaker_encoder: conv
speaker_encoder_conf:
    input_units: 256
    num_layers: 3
    num_units: 256
    kernel_size: 1
    dropout_rate: 0.0
    position_encoder: null
    out_units: 256
    out_norm: false
    auxiliary_states: false
    tf2torch_tensor_name_prefix_torch: speaker_encoder
    tf2torch_tensor_name_prefix_tf: EAND/speaker_encoder
ci_scorer: dot
ci_scorer_conf: {}
cd_scorer: san
cd_scorer_conf:
    input_size: 512
    output_size: 512
    out_units: 1
    attention_heads: 4
    linear_units: 1024
    num_blocks: 4
    dropout_rate: 0.0
    positional_dropout_rate: 0.0
    attention_dropout_rate: 0.0
    # use string "null" to remove input layer
    input_layer: "null"
    pos_enc_class: null
    normalize_before: true
    tf2torch_tensor_name_prefix_torch: cd_scorer
    tf2torch_tensor_name_prefix_tf: EAND/compute_distance_layer
# post net
decoder: fsmn
decoder_conf:
    in_units: 32
    out_units: 2517
    filter_size: 31
    fsmn_num_layers: 6
    dnn_num_layers: 1
    num_memory_units: 512
    ffn_inner_dim: 512
    dropout_rate: 0.0
    tf2torch_tensor_name_prefix_torch: decoder
    tf2torch_tensor_name_prefix_tf: EAND/post_net
frontend: wav_frontend
frontend_conf:
    fs: 16000
    window: povey
    n_mels: 80
    frame_length: 25
    frame_shift: 10
    filter_length_min: -1
    filter_length_max: -1
    lfr_m: 1
    lfr_n: 1
    dither: 0.0
    snip_edges: false
 
# minibatch related
batch_type: length
# 16s * 16k * 16 samples
batch_bins: 4096000
num_workers: 8
 
# optimization related
accum_grad: 1
grad_clip: 5
max_epoch: 50
val_scheduler_criterion:
    - valid
    - acc
best_model_criterion:
-   - valid
    - der
    - min
-   - valid
    - forward_steps
    - max
keep_nbest_models: 10
 
optim: adam
optim_conf:
   lr: 0.001
scheduler: warmuplr
scheduler_conf:
   warmup_steps: 10000
 
# without spec aug
specaug: null
specaug_conf:
    apply_time_warp: true
    time_warp_window: 5
    time_warp_mode: bicubic
    apply_freq_mask: true
    freq_mask_width_range:
    - 0
    - 30
    num_freq_mask: 2
    apply_time_mask: true
    time_mask_width_range:
    - 0
    - 40
    num_time_mask: 2
 
log_interval: 50
# without normalize
normalize: None