游雁
2024-02-23 8827e26b8d487f123f8d7d5cbd8d00b81dcefcff
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# This is an example that demonstrates how to configure a model file.
# You can modify the configuration according to your own requirements.
 
# to print the register_table:
# from funasr.register import tables
# tables.print()
 
# network architecture
model: LLMASRNAR
model_conf:
    lsm_weight: 0.1     # label smoothing option
    length_normalized_loss: true
 
# encoder
encoder: Paraformer
encoder_conf:
    hub: funasr
    init_param_path: "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
    freeze: false
 
llm: Vicuna
llm_conf:
  hub: hf
  init_param_path: "/nfs/maziyang.mzy/models/vicuna-7b-v1.5"
  freeze: true
 
adaptor: linear
adaptor_conf:
  downsample_rate: 1
  llm_dim: 4096
  encoder_dim: 2048
 
# frontend related
frontend: WavFrontend
frontend_conf:
    fs: 16000
    window: hamming
    n_mels: 80
    frame_length: 25
    frame_shift: 10
    dither: 0.0
    lfr_m: 1
    lfr_n: 1
 
specaug: SpecAug
specaug_conf:
    apply_time_warp: true
    time_warp_window: 5
    time_warp_mode: bicubic
    apply_freq_mask: true
    freq_mask_width_range:
    - 0
    - 30
    num_freq_mask: 2
    apply_time_mask: true
    time_mask_width_range:
    - 0
    - 40
    num_time_mask: 2
 
train_conf:
  accum_grad: 1
  grad_clip: 5
  max_epoch: 150
  keep_nbest_models: 10
  log_interval: 50
 
optim: adam
optim_conf:
   lr: 0.001
   weight_decay: 0.000001
scheduler: warmuplr
scheduler_conf:
   warmup_steps: 35000
 
dataset: AudioLLMDataset
dataset_conf:
    index_ds: IndexDSJsonl
    batch_sampler: RankFullLocalShuffleBatchSampler
    batch_type: example # example or length
    batch_size: 4 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len;
    max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length,
    buffer_size: 500
    shuffle: True
    num_workers: 4
 
tokenizer: HuggingfaceTokenizer
tokenizer_conf:
  unk_symbol: <unk>
  init_param_path: null