train_lm_adam.yaml 297 Bytes
Newer Older
ntut committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
lm_conf:
    nlayers: 4
    unit: 2048
optim: adam
optim_conf:
    lr: 0.001
batch_type: folded
batch_size: 400   # batch size in LM training
max_epoch: 20     # if the data size is large, we can reduce this
patience: 3

best_model_criterion:
-   - valid
    - loss
    - min
keep_nbest_models: 1