Qwen2.5-Math-7B-LIMO / trainer_state.json
flyingbugs's picture
Model save
7169b49 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 65,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 56.69625199712202,
"learning_rate": 7.142857142857143e-06,
"loss": 10.6721,
"step": 1
},
{
"epoch": 0.15384615384615385,
"grad_norm": 55.20314035746543,
"learning_rate": 1.4285714285714285e-05,
"loss": 10.847,
"step": 2
},
{
"epoch": 0.23076923076923078,
"grad_norm": 62.29911555217105,
"learning_rate": 2.1428571428571428e-05,
"loss": 10.0456,
"step": 3
},
{
"epoch": 0.3076923076923077,
"grad_norm": 60.40866589945923,
"learning_rate": 2.857142857142857e-05,
"loss": 3.7411,
"step": 4
},
{
"epoch": 0.38461538461538464,
"grad_norm": 5.296024443977825,
"learning_rate": 3.571428571428572e-05,
"loss": 1.3917,
"step": 5
},
{
"epoch": 0.46153846153846156,
"grad_norm": 11.63499005319304,
"learning_rate": 4.2857142857142856e-05,
"loss": 1.1571,
"step": 6
},
{
"epoch": 0.5384615384615384,
"grad_norm": 2.310497639622062,
"learning_rate": 5e-05,
"loss": 1.0452,
"step": 7
},
{
"epoch": 0.6153846153846154,
"grad_norm": 1.6470093668171653,
"learning_rate": 4.913793103448276e-05,
"loss": 1.0123,
"step": 8
},
{
"epoch": 0.6923076923076923,
"grad_norm": 2.026871155602432,
"learning_rate": 4.827586206896552e-05,
"loss": 0.8787,
"step": 9
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.36441223391375,
"learning_rate": 4.741379310344828e-05,
"loss": 0.8673,
"step": 10
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.64152552079298,
"learning_rate": 4.655172413793104e-05,
"loss": 0.7986,
"step": 11
},
{
"epoch": 0.9230769230769231,
"grad_norm": 1.084291869382038,
"learning_rate": 4.5689655172413794e-05,
"loss": 0.7852,
"step": 12
},
{
"epoch": 1.0,
"grad_norm": 0.8244322351661075,
"learning_rate": 4.482758620689655e-05,
"loss": 0.7835,
"step": 13
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.8467579577588499,
"learning_rate": 4.396551724137931e-05,
"loss": 0.7841,
"step": 14
},
{
"epoch": 1.1538461538461537,
"grad_norm": 1.0394065154633032,
"learning_rate": 4.3103448275862066e-05,
"loss": 0.6965,
"step": 15
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.6303665611617261,
"learning_rate": 4.224137931034483e-05,
"loss": 0.692,
"step": 16
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.6650023019130861,
"learning_rate": 4.1379310344827587e-05,
"loss": 0.7196,
"step": 17
},
{
"epoch": 1.3846153846153846,
"grad_norm": 0.5849872999722573,
"learning_rate": 4.0517241379310344e-05,
"loss": 0.6763,
"step": 18
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.6568623192873284,
"learning_rate": 3.965517241379311e-05,
"loss": 0.6003,
"step": 19
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.6188950192790953,
"learning_rate": 3.8793103448275865e-05,
"loss": 0.698,
"step": 20
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.43937943678839847,
"learning_rate": 3.793103448275862e-05,
"loss": 0.587,
"step": 21
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.46861722120782073,
"learning_rate": 3.7068965517241385e-05,
"loss": 0.6261,
"step": 22
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.5938781657836611,
"learning_rate": 3.620689655172414e-05,
"loss": 0.7064,
"step": 23
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.40181396579561623,
"learning_rate": 3.53448275862069e-05,
"loss": 0.5901,
"step": 24
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.41241038369811467,
"learning_rate": 3.4482758620689657e-05,
"loss": 0.6272,
"step": 25
},
{
"epoch": 2.0,
"grad_norm": 0.5098875911597754,
"learning_rate": 3.3620689655172414e-05,
"loss": 0.6515,
"step": 26
},
{
"epoch": 2.076923076923077,
"grad_norm": 0.4339068791945304,
"learning_rate": 3.275862068965517e-05,
"loss": 0.5543,
"step": 27
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.5054494096764832,
"learning_rate": 3.1896551724137935e-05,
"loss": 0.5018,
"step": 28
},
{
"epoch": 2.230769230769231,
"grad_norm": 0.4238540025684798,
"learning_rate": 3.103448275862069e-05,
"loss": 0.6447,
"step": 29
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.338738261804734,
"learning_rate": 3.017241379310345e-05,
"loss": 0.5072,
"step": 30
},
{
"epoch": 2.3846153846153846,
"grad_norm": 0.38342740921195684,
"learning_rate": 2.9310344827586206e-05,
"loss": 0.5681,
"step": 31
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.36692692741060295,
"learning_rate": 2.844827586206897e-05,
"loss": 0.5098,
"step": 32
},
{
"epoch": 2.5384615384615383,
"grad_norm": 0.3649135699524806,
"learning_rate": 2.7586206896551727e-05,
"loss": 0.5079,
"step": 33
},
{
"epoch": 2.6153846153846154,
"grad_norm": 0.3761181644174631,
"learning_rate": 2.672413793103448e-05,
"loss": 0.495,
"step": 34
},
{
"epoch": 2.6923076923076925,
"grad_norm": 0.3417797037871458,
"learning_rate": 2.5862068965517244e-05,
"loss": 0.4813,
"step": 35
},
{
"epoch": 2.769230769230769,
"grad_norm": 0.40945158138753057,
"learning_rate": 2.5e-05,
"loss": 0.562,
"step": 36
},
{
"epoch": 2.8461538461538463,
"grad_norm": 0.3094047547298436,
"learning_rate": 2.413793103448276e-05,
"loss": 0.5303,
"step": 37
},
{
"epoch": 2.9230769230769234,
"grad_norm": 0.4296909197321008,
"learning_rate": 2.327586206896552e-05,
"loss": 0.5714,
"step": 38
},
{
"epoch": 3.0,
"grad_norm": 0.3729941423109703,
"learning_rate": 2.2413793103448276e-05,
"loss": 0.4989,
"step": 39
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.32599584121247344,
"learning_rate": 2.1551724137931033e-05,
"loss": 0.4522,
"step": 40
},
{
"epoch": 3.1538461538461537,
"grad_norm": 0.36372367238403003,
"learning_rate": 2.0689655172413793e-05,
"loss": 0.4778,
"step": 41
},
{
"epoch": 3.230769230769231,
"grad_norm": 0.3972570731654902,
"learning_rate": 1.9827586206896554e-05,
"loss": 0.4447,
"step": 42
},
{
"epoch": 3.3076923076923075,
"grad_norm": 0.4012637330387787,
"learning_rate": 1.896551724137931e-05,
"loss": 0.4127,
"step": 43
},
{
"epoch": 3.3846153846153846,
"grad_norm": 0.3026808512455307,
"learning_rate": 1.810344827586207e-05,
"loss": 0.4089,
"step": 44
},
{
"epoch": 3.4615384615384617,
"grad_norm": 0.3511308932223884,
"learning_rate": 1.7241379310344828e-05,
"loss": 0.4866,
"step": 45
},
{
"epoch": 3.5384615384615383,
"grad_norm": 0.44325984348261416,
"learning_rate": 1.6379310344827585e-05,
"loss": 0.4831,
"step": 46
},
{
"epoch": 3.6153846153846154,
"grad_norm": 0.3193474058304498,
"learning_rate": 1.5517241379310346e-05,
"loss": 0.4396,
"step": 47
},
{
"epoch": 3.6923076923076925,
"grad_norm": 0.3366172034999065,
"learning_rate": 1.4655172413793103e-05,
"loss": 0.451,
"step": 48
},
{
"epoch": 3.769230769230769,
"grad_norm": 0.3192724937483484,
"learning_rate": 1.3793103448275863e-05,
"loss": 0.3647,
"step": 49
},
{
"epoch": 3.8461538461538463,
"grad_norm": 0.41141681100708327,
"learning_rate": 1.2931034482758622e-05,
"loss": 0.4031,
"step": 50
},
{
"epoch": 3.9230769230769234,
"grad_norm": 0.32878139231790354,
"learning_rate": 1.206896551724138e-05,
"loss": 0.4331,
"step": 51
},
{
"epoch": 4.0,
"grad_norm": 0.2514182705601677,
"learning_rate": 1.1206896551724138e-05,
"loss": 0.4041,
"step": 52
},
{
"epoch": 4.076923076923077,
"grad_norm": 0.30360346561484663,
"learning_rate": 1.0344827586206897e-05,
"loss": 0.4296,
"step": 53
},
{
"epoch": 4.153846153846154,
"grad_norm": 0.37730490335834466,
"learning_rate": 9.482758620689655e-06,
"loss": 0.3607,
"step": 54
},
{
"epoch": 4.230769230769231,
"grad_norm": 0.26735234806360975,
"learning_rate": 8.620689655172414e-06,
"loss": 0.3805,
"step": 55
},
{
"epoch": 4.3076923076923075,
"grad_norm": 0.2666130294253567,
"learning_rate": 7.758620689655173e-06,
"loss": 0.3682,
"step": 56
},
{
"epoch": 4.384615384615385,
"grad_norm": 0.30258601083800113,
"learning_rate": 6.896551724137932e-06,
"loss": 0.3824,
"step": 57
},
{
"epoch": 4.461538461538462,
"grad_norm": 0.3051670708166745,
"learning_rate": 6.03448275862069e-06,
"loss": 0.3327,
"step": 58
},
{
"epoch": 4.538461538461538,
"grad_norm": 0.37192436505550563,
"learning_rate": 5.172413793103448e-06,
"loss": 0.4074,
"step": 59
},
{
"epoch": 4.615384615384615,
"grad_norm": 0.26152453728943026,
"learning_rate": 4.310344827586207e-06,
"loss": 0.3992,
"step": 60
},
{
"epoch": 4.6923076923076925,
"grad_norm": 0.25734721330237487,
"learning_rate": 3.448275862068966e-06,
"loss": 0.4249,
"step": 61
},
{
"epoch": 4.769230769230769,
"grad_norm": 0.23907794412322864,
"learning_rate": 2.586206896551724e-06,
"loss": 0.3945,
"step": 62
},
{
"epoch": 4.846153846153846,
"grad_norm": 0.2511817058634514,
"learning_rate": 1.724137931034483e-06,
"loss": 0.3262,
"step": 63
},
{
"epoch": 4.923076923076923,
"grad_norm": 0.25996184570432856,
"learning_rate": 8.620689655172415e-07,
"loss": 0.3407,
"step": 64
},
{
"epoch": 5.0,
"grad_norm": 0.25930270839584474,
"learning_rate": 0.0,
"loss": 0.3129,
"step": 65
},
{
"epoch": 5.0,
"step": 65,
"total_flos": 6.364145551230894e+17,
"train_loss": 1.0789901279486143,
"train_runtime": 4151.3547,
"train_samples_per_second": 0.247,
"train_steps_per_second": 0.016
}
],
"logging_steps": 1,
"max_steps": 65,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.364145551230894e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}