GaetanMichelet's picture
Model save
6f3caf8 verified
raw
history blame contribute delete
No virus
32.4 kB
{
"best_metric": 0.44272685050964355,
"best_model_checkpoint": "data/Llama-31-8B_task-3_120-samples_config-4/checkpoint-214",
"epoch": 46.0,
"eval_steps": 500,
"global_step": 253,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18181818181818182,
"grad_norm": 3.1690733432769775,
"learning_rate": 1.3333333333333336e-07,
"loss": 2.8174,
"step": 1
},
{
"epoch": 0.36363636363636365,
"grad_norm": 2.785377025604248,
"learning_rate": 2.666666666666667e-07,
"loss": 2.5884,
"step": 2
},
{
"epoch": 0.7272727272727273,
"grad_norm": 2.4831278324127197,
"learning_rate": 5.333333333333335e-07,
"loss": 2.4707,
"step": 4
},
{
"epoch": 0.9090909090909091,
"eval_loss": 2.502408742904663,
"eval_runtime": 20.9991,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 5
},
{
"epoch": 1.0909090909090908,
"grad_norm": 2.511209726333618,
"learning_rate": 8.000000000000001e-07,
"loss": 2.3921,
"step": 6
},
{
"epoch": 1.4545454545454546,
"grad_norm": 3.1637580394744873,
"learning_rate": 1.066666666666667e-06,
"loss": 2.5814,
"step": 8
},
{
"epoch": 1.8181818181818183,
"grad_norm": 2.612966775894165,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.3847,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 2.479367971420288,
"eval_runtime": 21.0052,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 11
},
{
"epoch": 2.1818181818181817,
"grad_norm": 2.905508518218994,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.6936,
"step": 12
},
{
"epoch": 2.5454545454545454,
"grad_norm": 2.6060760021209717,
"learning_rate": 1.8666666666666669e-06,
"loss": 2.3757,
"step": 14
},
{
"epoch": 2.909090909090909,
"grad_norm": 3.210188150405884,
"learning_rate": 2.133333333333334e-06,
"loss": 2.5822,
"step": 16
},
{
"epoch": 2.909090909090909,
"eval_loss": 2.4506218433380127,
"eval_runtime": 21.0015,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 16
},
{
"epoch": 3.2727272727272725,
"grad_norm": 3.861948013305664,
"learning_rate": 2.4000000000000003e-06,
"loss": 2.6532,
"step": 18
},
{
"epoch": 3.6363636363636362,
"grad_norm": 2.5222151279449463,
"learning_rate": 2.666666666666667e-06,
"loss": 2.3743,
"step": 20
},
{
"epoch": 4.0,
"grad_norm": 2.5924019813537598,
"learning_rate": 2.9333333333333338e-06,
"loss": 2.2635,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 2.3902969360351562,
"eval_runtime": 21.0176,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 22
},
{
"epoch": 4.363636363636363,
"grad_norm": 2.239548921585083,
"learning_rate": 3.2000000000000003e-06,
"loss": 2.4045,
"step": 24
},
{
"epoch": 4.7272727272727275,
"grad_norm": 2.6937167644500732,
"learning_rate": 3.4666666666666672e-06,
"loss": 2.446,
"step": 26
},
{
"epoch": 4.909090909090909,
"eval_loss": 2.3183953762054443,
"eval_runtime": 21.0027,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 27
},
{
"epoch": 5.090909090909091,
"grad_norm": 2.26949143409729,
"learning_rate": 3.7333333333333337e-06,
"loss": 2.1955,
"step": 28
},
{
"epoch": 5.454545454545454,
"grad_norm": 3.2933804988861084,
"learning_rate": 4.000000000000001e-06,
"loss": 2.3794,
"step": 30
},
{
"epoch": 5.818181818181818,
"grad_norm": 3.081024169921875,
"learning_rate": 4.266666666666668e-06,
"loss": 2.3172,
"step": 32
},
{
"epoch": 6.0,
"eval_loss": 2.19378399848938,
"eval_runtime": 20.9996,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 33
},
{
"epoch": 6.181818181818182,
"grad_norm": 3.226388454437256,
"learning_rate": 4.533333333333334e-06,
"loss": 2.2373,
"step": 34
},
{
"epoch": 6.545454545454545,
"grad_norm": 1.6646192073822021,
"learning_rate": 4.800000000000001e-06,
"loss": 2.0122,
"step": 36
},
{
"epoch": 6.909090909090909,
"grad_norm": 2.573935031890869,
"learning_rate": 5.0666666666666676e-06,
"loss": 2.0582,
"step": 38
},
{
"epoch": 6.909090909090909,
"eval_loss": 2.0559873580932617,
"eval_runtime": 21.0001,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 38
},
{
"epoch": 7.2727272727272725,
"grad_norm": 2.028982639312744,
"learning_rate": 5.333333333333334e-06,
"loss": 1.9739,
"step": 40
},
{
"epoch": 7.636363636363637,
"grad_norm": 2.90993595123291,
"learning_rate": 5.600000000000001e-06,
"loss": 2.0373,
"step": 42
},
{
"epoch": 8.0,
"grad_norm": 2.5906777381896973,
"learning_rate": 5.8666666666666675e-06,
"loss": 1.9038,
"step": 44
},
{
"epoch": 8.0,
"eval_loss": 1.8385127782821655,
"eval_runtime": 20.9958,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 44
},
{
"epoch": 8.363636363636363,
"grad_norm": 2.548511266708374,
"learning_rate": 6.133333333333334e-06,
"loss": 1.7707,
"step": 46
},
{
"epoch": 8.727272727272727,
"grad_norm": 2.2620716094970703,
"learning_rate": 6.4000000000000006e-06,
"loss": 1.7291,
"step": 48
},
{
"epoch": 8.909090909090908,
"eval_loss": 1.625160574913025,
"eval_runtime": 20.9978,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 49
},
{
"epoch": 9.090909090909092,
"grad_norm": 2.2526345252990723,
"learning_rate": 6.666666666666667e-06,
"loss": 1.607,
"step": 50
},
{
"epoch": 9.454545454545455,
"grad_norm": 1.2636234760284424,
"learning_rate": 6.9333333333333344e-06,
"loss": 1.5173,
"step": 52
},
{
"epoch": 9.818181818181818,
"grad_norm": 2.455042600631714,
"learning_rate": 7.2000000000000005e-06,
"loss": 1.3996,
"step": 54
},
{
"epoch": 10.0,
"eval_loss": 1.345643162727356,
"eval_runtime": 21.0044,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 55
},
{
"epoch": 10.181818181818182,
"grad_norm": 2.40154767036438,
"learning_rate": 7.4666666666666675e-06,
"loss": 1.298,
"step": 56
},
{
"epoch": 10.545454545454545,
"grad_norm": 2.0422167778015137,
"learning_rate": 7.733333333333334e-06,
"loss": 1.2468,
"step": 58
},
{
"epoch": 10.909090909090908,
"grad_norm": 2.330064058303833,
"learning_rate": 8.000000000000001e-06,
"loss": 1.1127,
"step": 60
},
{
"epoch": 10.909090909090908,
"eval_loss": 1.1189473867416382,
"eval_runtime": 21.0042,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 60
},
{
"epoch": 11.272727272727273,
"grad_norm": 1.6237120628356934,
"learning_rate": 8.266666666666667e-06,
"loss": 1.0346,
"step": 62
},
{
"epoch": 11.636363636363637,
"grad_norm": 1.9251773357391357,
"learning_rate": 8.533333333333335e-06,
"loss": 0.9213,
"step": 64
},
{
"epoch": 12.0,
"grad_norm": 1.3815380334854126,
"learning_rate": 8.8e-06,
"loss": 0.8648,
"step": 66
},
{
"epoch": 12.0,
"eval_loss": 0.8626702427864075,
"eval_runtime": 21.003,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 66
},
{
"epoch": 12.363636363636363,
"grad_norm": 1.8721394538879395,
"learning_rate": 9.066666666666667e-06,
"loss": 0.6295,
"step": 68
},
{
"epoch": 12.727272727272727,
"grad_norm": 1.1077913045883179,
"learning_rate": 9.333333333333334e-06,
"loss": 0.8247,
"step": 70
},
{
"epoch": 12.909090909090908,
"eval_loss": 0.7228104472160339,
"eval_runtime": 20.9976,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 71
},
{
"epoch": 13.090909090909092,
"grad_norm": 0.9280027151107788,
"learning_rate": 9.600000000000001e-06,
"loss": 0.6643,
"step": 72
},
{
"epoch": 13.454545454545455,
"grad_norm": 0.8919649720191956,
"learning_rate": 9.866666666666668e-06,
"loss": 0.5425,
"step": 74
},
{
"epoch": 13.818181818181818,
"grad_norm": 0.8291063904762268,
"learning_rate": 9.999945845889795e-06,
"loss": 0.5681,
"step": 76
},
{
"epoch": 14.0,
"eval_loss": 0.6452742218971252,
"eval_runtime": 21.0019,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 77
},
{
"epoch": 14.181818181818182,
"grad_norm": 0.8017318844795227,
"learning_rate": 9.999512620046523e-06,
"loss": 0.6102,
"step": 78
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.48972490429878235,
"learning_rate": 9.99864620589731e-06,
"loss": 0.5325,
"step": 80
},
{
"epoch": 14.909090909090908,
"grad_norm": 0.6467877626419067,
"learning_rate": 9.99734667851357e-06,
"loss": 0.4968,
"step": 82
},
{
"epoch": 14.909090909090908,
"eval_loss": 0.6019716858863831,
"eval_runtime": 21.0041,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 82
},
{
"epoch": 15.272727272727273,
"grad_norm": 0.4685284495353699,
"learning_rate": 9.995614150494293e-06,
"loss": 0.4665,
"step": 84
},
{
"epoch": 15.636363636363637,
"grad_norm": 0.6542117595672607,
"learning_rate": 9.993448771956285e-06,
"loss": 0.4592,
"step": 86
},
{
"epoch": 16.0,
"grad_norm": 0.7055411338806152,
"learning_rate": 9.99085073052117e-06,
"loss": 0.589,
"step": 88
},
{
"epoch": 16.0,
"eval_loss": 0.5632085800170898,
"eval_runtime": 21.0058,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 88
},
{
"epoch": 16.363636363636363,
"grad_norm": 0.5362458825111389,
"learning_rate": 9.987820251299121e-06,
"loss": 0.5109,
"step": 90
},
{
"epoch": 16.727272727272727,
"grad_norm": 0.3406112492084503,
"learning_rate": 9.984357596869369e-06,
"loss": 0.3902,
"step": 92
},
{
"epoch": 16.90909090909091,
"eval_loss": 0.5393884778022766,
"eval_runtime": 21.0005,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 93
},
{
"epoch": 17.09090909090909,
"grad_norm": 0.5570920705795288,
"learning_rate": 9.980463067257437e-06,
"loss": 0.5325,
"step": 94
},
{
"epoch": 17.454545454545453,
"grad_norm": 0.40238556265830994,
"learning_rate": 9.976136999909156e-06,
"loss": 0.4039,
"step": 96
},
{
"epoch": 17.818181818181817,
"grad_norm": 0.6511011719703674,
"learning_rate": 9.971379769661422e-06,
"loss": 0.4795,
"step": 98
},
{
"epoch": 18.0,
"eval_loss": 0.5253307819366455,
"eval_runtime": 21.0047,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 99
},
{
"epoch": 18.181818181818183,
"grad_norm": 0.4080711305141449,
"learning_rate": 9.966191788709716e-06,
"loss": 0.4767,
"step": 100
},
{
"epoch": 18.545454545454547,
"grad_norm": 0.35794946551322937,
"learning_rate": 9.960573506572391e-06,
"loss": 0.4024,
"step": 102
},
{
"epoch": 18.90909090909091,
"grad_norm": 0.2140028029680252,
"learning_rate": 9.95452541005172e-06,
"loss": 0.3937,
"step": 104
},
{
"epoch": 18.90909090909091,
"eval_loss": 0.5187709927558899,
"eval_runtime": 20.9985,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 104
},
{
"epoch": 19.272727272727273,
"grad_norm": 0.26322704553604126,
"learning_rate": 9.948048023191728e-06,
"loss": 0.3845,
"step": 106
},
{
"epoch": 19.636363636363637,
"grad_norm": 0.29932188987731934,
"learning_rate": 9.941141907232766e-06,
"loss": 0.5589,
"step": 108
},
{
"epoch": 20.0,
"grad_norm": 0.3077118396759033,
"learning_rate": 9.933807660562898e-06,
"loss": 0.3482,
"step": 110
},
{
"epoch": 20.0,
"eval_loss": 0.5148636698722839,
"eval_runtime": 21.0007,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 110
},
{
"epoch": 20.363636363636363,
"grad_norm": 0.2408452033996582,
"learning_rate": 9.926045918666045e-06,
"loss": 0.3168,
"step": 112
},
{
"epoch": 20.727272727272727,
"grad_norm": 0.24809201061725616,
"learning_rate": 9.91785735406693e-06,
"loss": 0.4633,
"step": 114
},
{
"epoch": 20.90909090909091,
"eval_loss": 0.5076439380645752,
"eval_runtime": 21.008,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 115
},
{
"epoch": 21.09090909090909,
"grad_norm": 0.4201977550983429,
"learning_rate": 9.909242676272797e-06,
"loss": 0.5529,
"step": 116
},
{
"epoch": 21.454545454545453,
"grad_norm": 0.30744773149490356,
"learning_rate": 9.90020263171194e-06,
"loss": 0.3469,
"step": 118
},
{
"epoch": 21.818181818181817,
"grad_norm": 0.2413516491651535,
"learning_rate": 9.890738003669029e-06,
"loss": 0.4324,
"step": 120
},
{
"epoch": 22.0,
"eval_loss": 0.507264256477356,
"eval_runtime": 20.9993,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 121
},
{
"epoch": 22.181818181818183,
"grad_norm": 0.291746586561203,
"learning_rate": 9.880849612217238e-06,
"loss": 0.325,
"step": 122
},
{
"epoch": 22.545454545454547,
"grad_norm": 0.22690318524837494,
"learning_rate": 9.870538314147194e-06,
"loss": 0.3262,
"step": 124
},
{
"epoch": 22.90909090909091,
"grad_norm": 0.28505831956863403,
"learning_rate": 9.859805002892733e-06,
"loss": 0.5268,
"step": 126
},
{
"epoch": 22.90909090909091,
"eval_loss": 0.5014194846153259,
"eval_runtime": 21.0084,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 126
},
{
"epoch": 23.272727272727273,
"grad_norm": 0.33074063062667847,
"learning_rate": 9.84865060845349e-06,
"loss": 0.4705,
"step": 128
},
{
"epoch": 23.636363636363637,
"grad_norm": 0.24479380249977112,
"learning_rate": 9.83707609731432e-06,
"loss": 0.3475,
"step": 130
},
{
"epoch": 24.0,
"grad_norm": 0.2692837715148926,
"learning_rate": 9.825082472361558e-06,
"loss": 0.3829,
"step": 132
},
{
"epoch": 24.0,
"eval_loss": 0.49462565779685974,
"eval_runtime": 21.0006,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 132
},
{
"epoch": 24.363636363636363,
"grad_norm": 0.2481246441602707,
"learning_rate": 9.812670772796113e-06,
"loss": 0.3245,
"step": 134
},
{
"epoch": 24.727272727272727,
"grad_norm": 0.2520200312137604,
"learning_rate": 9.799842074043438e-06,
"loss": 0.3884,
"step": 136
},
{
"epoch": 24.90909090909091,
"eval_loss": 0.4874545633792877,
"eval_runtime": 21.0022,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 137
},
{
"epoch": 25.09090909090909,
"grad_norm": 0.34048882126808167,
"learning_rate": 9.786597487660336e-06,
"loss": 0.5025,
"step": 138
},
{
"epoch": 25.454545454545453,
"grad_norm": 0.2133476287126541,
"learning_rate": 9.77293816123866e-06,
"loss": 0.3012,
"step": 140
},
{
"epoch": 25.818181818181817,
"grad_norm": 0.3172047734260559,
"learning_rate": 9.75886527830587e-06,
"loss": 0.3955,
"step": 142
},
{
"epoch": 26.0,
"eval_loss": 0.4859234094619751,
"eval_runtime": 21.0019,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 143
},
{
"epoch": 26.181818181818183,
"grad_norm": 0.299968957901001,
"learning_rate": 9.744380058222483e-06,
"loss": 0.471,
"step": 144
},
{
"epoch": 26.545454545454547,
"grad_norm": 0.22688376903533936,
"learning_rate": 9.729483756076436e-06,
"loss": 0.361,
"step": 146
},
{
"epoch": 26.90909090909091,
"grad_norm": 0.20205911993980408,
"learning_rate": 9.714177662574316e-06,
"loss": 0.3296,
"step": 148
},
{
"epoch": 26.90909090909091,
"eval_loss": 0.482715368270874,
"eval_runtime": 21.0014,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 148
},
{
"epoch": 27.272727272727273,
"grad_norm": 0.2501104772090912,
"learning_rate": 9.698463103929542e-06,
"loss": 0.3371,
"step": 150
},
{
"epoch": 27.636363636363637,
"grad_norm": 0.2598528265953064,
"learning_rate": 9.682341441747446e-06,
"loss": 0.439,
"step": 152
},
{
"epoch": 28.0,
"grad_norm": 0.25387951731681824,
"learning_rate": 9.665814072907293e-06,
"loss": 0.364,
"step": 154
},
{
"epoch": 28.0,
"eval_loss": 0.4805121123790741,
"eval_runtime": 21.002,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 154
},
{
"epoch": 28.363636363636363,
"grad_norm": 0.2787465751171112,
"learning_rate": 9.648882429441258e-06,
"loss": 0.4797,
"step": 156
},
{
"epoch": 28.727272727272727,
"grad_norm": 0.25384321808815,
"learning_rate": 9.63154797841033e-06,
"loss": 0.3218,
"step": 158
},
{
"epoch": 28.90909090909091,
"eval_loss": 0.4764772951602936,
"eval_runtime": 21.0046,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 159
},
{
"epoch": 29.09090909090909,
"grad_norm": 0.2499808818101883,
"learning_rate": 9.613812221777212e-06,
"loss": 0.302,
"step": 160
},
{
"epoch": 29.454545454545453,
"grad_norm": 0.24313043057918549,
"learning_rate": 9.595676696276173e-06,
"loss": 0.4349,
"step": 162
},
{
"epoch": 29.818181818181817,
"grad_norm": 0.26249364018440247,
"learning_rate": 9.577142973279896e-06,
"loss": 0.2995,
"step": 164
},
{
"epoch": 30.0,
"eval_loss": 0.4727340638637543,
"eval_runtime": 21.0056,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 165
},
{
"epoch": 30.181818181818183,
"grad_norm": 0.3164134621620178,
"learning_rate": 9.55821265866333e-06,
"loss": 0.3373,
"step": 166
},
{
"epoch": 30.545454545454547,
"grad_norm": 0.23215709626674652,
"learning_rate": 9.538887392664544e-06,
"loss": 0.3434,
"step": 168
},
{
"epoch": 30.90909090909091,
"grad_norm": 0.24047435820102692,
"learning_rate": 9.519168849742603e-06,
"loss": 0.3728,
"step": 170
},
{
"epoch": 30.90909090909091,
"eval_loss": 0.4668484032154083,
"eval_runtime": 21.0083,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 170
},
{
"epoch": 31.272727272727273,
"grad_norm": 0.38881421089172363,
"learning_rate": 9.499058738432492e-06,
"loss": 0.4329,
"step": 172
},
{
"epoch": 31.636363636363637,
"grad_norm": 0.38587087392807007,
"learning_rate": 9.478558801197065e-06,
"loss": 0.3596,
"step": 174
},
{
"epoch": 32.0,
"grad_norm": 0.2352934628725052,
"learning_rate": 9.457670814276083e-06,
"loss": 0.2413,
"step": 176
},
{
"epoch": 32.0,
"eval_loss": 0.46528127789497375,
"eval_runtime": 20.9994,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 176
},
{
"epoch": 32.36363636363637,
"grad_norm": 0.32384243607521057,
"learning_rate": 9.436396587532297e-06,
"loss": 0.3098,
"step": 178
},
{
"epoch": 32.72727272727273,
"grad_norm": 0.3551454544067383,
"learning_rate": 9.414737964294636e-06,
"loss": 0.4141,
"step": 180
},
{
"epoch": 32.90909090909091,
"eval_loss": 0.46264728903770447,
"eval_runtime": 21.0065,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 181
},
{
"epoch": 33.09090909090909,
"grad_norm": 0.365192174911499,
"learning_rate": 9.392696821198488e-06,
"loss": 0.2879,
"step": 182
},
{
"epoch": 33.45454545454545,
"grad_norm": 0.38976219296455383,
"learning_rate": 9.370275068023097e-06,
"loss": 0.3467,
"step": 184
},
{
"epoch": 33.81818181818182,
"grad_norm": 0.3077632486820221,
"learning_rate": 9.347474647526095e-06,
"loss": 0.3693,
"step": 186
},
{
"epoch": 34.0,
"eval_loss": 0.4562326967716217,
"eval_runtime": 21.0019,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 187
},
{
"epoch": 34.18181818181818,
"grad_norm": 0.209530308842659,
"learning_rate": 9.324297535275156e-06,
"loss": 0.1788,
"step": 188
},
{
"epoch": 34.54545454545455,
"grad_norm": 0.36394768953323364,
"learning_rate": 9.30074573947683e-06,
"loss": 0.3143,
"step": 190
},
{
"epoch": 34.90909090909091,
"grad_norm": 0.41356202960014343,
"learning_rate": 9.276821300802535e-06,
"loss": 0.3666,
"step": 192
},
{
"epoch": 34.90909090909091,
"eval_loss": 0.45260027050971985,
"eval_runtime": 21.0113,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 192
},
{
"epoch": 35.27272727272727,
"grad_norm": 0.42078325152397156,
"learning_rate": 9.25252629221175e-06,
"loss": 0.468,
"step": 194
},
{
"epoch": 35.63636363636363,
"grad_norm": 0.30819612741470337,
"learning_rate": 9.227862818772392e-06,
"loss": 0.1892,
"step": 196
},
{
"epoch": 36.0,
"grad_norm": 0.3855637013912201,
"learning_rate": 9.202833017478421e-06,
"loss": 0.3605,
"step": 198
},
{
"epoch": 36.0,
"eval_loss": 0.4506441354751587,
"eval_runtime": 20.9992,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 198
},
{
"epoch": 36.36363636363637,
"grad_norm": 0.39254796504974365,
"learning_rate": 9.177439057064684e-06,
"loss": 0.3432,
"step": 200
},
{
"epoch": 36.72727272727273,
"grad_norm": 0.3540774881839752,
"learning_rate": 9.151683137818989e-06,
"loss": 0.2923,
"step": 202
},
{
"epoch": 36.90909090909091,
"eval_loss": 0.44742465019226074,
"eval_runtime": 21.0039,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 203
},
{
"epoch": 37.09090909090909,
"grad_norm": 0.4445912539958954,
"learning_rate": 9.125567491391476e-06,
"loss": 0.3135,
"step": 204
},
{
"epoch": 37.45454545454545,
"grad_norm": 0.39482685923576355,
"learning_rate": 9.099094380601244e-06,
"loss": 0.2407,
"step": 206
},
{
"epoch": 37.81818181818182,
"grad_norm": 0.3445771634578705,
"learning_rate": 9.072266099240286e-06,
"loss": 0.3422,
"step": 208
},
{
"epoch": 38.0,
"eval_loss": 0.4443068504333496,
"eval_runtime": 21.0039,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 209
},
{
"epoch": 38.18181818181818,
"grad_norm": 0.42600157856941223,
"learning_rate": 9.045084971874738e-06,
"loss": 0.2673,
"step": 210
},
{
"epoch": 38.54545454545455,
"grad_norm": 0.4967671036720276,
"learning_rate": 9.017553353643479e-06,
"loss": 0.3177,
"step": 212
},
{
"epoch": 38.90909090909091,
"grad_norm": 0.39350730180740356,
"learning_rate": 8.989673630054044e-06,
"loss": 0.3489,
"step": 214
},
{
"epoch": 38.90909090909091,
"eval_loss": 0.44272685050964355,
"eval_runtime": 21.0007,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 214
},
{
"epoch": 39.27272727272727,
"grad_norm": 0.27645283937454224,
"learning_rate": 8.961448216775955e-06,
"loss": 0.161,
"step": 216
},
{
"epoch": 39.63636363636363,
"grad_norm": 0.615763783454895,
"learning_rate": 8.932879559431392e-06,
"loss": 0.2562,
"step": 218
},
{
"epoch": 40.0,
"grad_norm": 0.491910845041275,
"learning_rate": 8.903970133383297e-06,
"loss": 0.3737,
"step": 220
},
{
"epoch": 40.0,
"eval_loss": 0.443479984998703,
"eval_runtime": 21.0049,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 220
},
{
"epoch": 40.36363636363637,
"grad_norm": 0.47909313440322876,
"learning_rate": 8.874722443520898e-06,
"loss": 0.2508,
"step": 222
},
{
"epoch": 40.72727272727273,
"grad_norm": 0.5516977906227112,
"learning_rate": 8.845139024042664e-06,
"loss": 0.2804,
"step": 224
},
{
"epoch": 40.90909090909091,
"eval_loss": 0.443651407957077,
"eval_runtime": 21.007,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 225
},
{
"epoch": 41.09090909090909,
"grad_norm": 0.5438214540481567,
"learning_rate": 8.815222438236726e-06,
"loss": 0.3221,
"step": 226
},
{
"epoch": 41.45454545454545,
"grad_norm": 0.49092897772789,
"learning_rate": 8.784975278258783e-06,
"loss": 0.2601,
"step": 228
},
{
"epoch": 41.81818181818182,
"grad_norm": 0.5809181332588196,
"learning_rate": 8.754400164907496e-06,
"loss": 0.3212,
"step": 230
},
{
"epoch": 42.0,
"eval_loss": 0.44689202308654785,
"eval_runtime": 21.001,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 231
},
{
"epoch": 42.18181818181818,
"grad_norm": 0.5147470831871033,
"learning_rate": 8.723499747397415e-06,
"loss": 0.1686,
"step": 232
},
{
"epoch": 42.54545454545455,
"grad_norm": 0.7372314929962158,
"learning_rate": 8.692276703129421e-06,
"loss": 0.2733,
"step": 234
},
{
"epoch": 42.90909090909091,
"grad_norm": 0.4814988374710083,
"learning_rate": 8.660733737458751e-06,
"loss": 0.2322,
"step": 236
},
{
"epoch": 42.90909090909091,
"eval_loss": 0.4480229318141937,
"eval_runtime": 21.0124,
"eval_samples_per_second": 1.142,
"eval_steps_per_second": 1.142,
"step": 236
},
{
"epoch": 43.27272727272727,
"grad_norm": 0.7270358800888062,
"learning_rate": 8.628873583460593e-06,
"loss": 0.2312,
"step": 238
},
{
"epoch": 43.63636363636363,
"grad_norm": 0.6802980303764343,
"learning_rate": 8.596699001693257e-06,
"loss": 0.2551,
"step": 240
},
{
"epoch": 44.0,
"grad_norm": 0.6301150321960449,
"learning_rate": 8.564212779959003e-06,
"loss": 0.2569,
"step": 242
},
{
"epoch": 44.0,
"eval_loss": 0.4507286548614502,
"eval_runtime": 21.0008,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 242
},
{
"epoch": 44.36363636363637,
"grad_norm": 0.6666437983512878,
"learning_rate": 8.531417733062476e-06,
"loss": 0.2299,
"step": 244
},
{
"epoch": 44.72727272727273,
"grad_norm": 0.6867101788520813,
"learning_rate": 8.498316702566828e-06,
"loss": 0.2501,
"step": 246
},
{
"epoch": 44.90909090909091,
"eval_loss": 0.4543609619140625,
"eval_runtime": 21.005,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 247
},
{
"epoch": 45.09090909090909,
"grad_norm": 0.571156919002533,
"learning_rate": 8.464912556547486e-06,
"loss": 0.2035,
"step": 248
},
{
"epoch": 45.45454545454545,
"grad_norm": 0.761677086353302,
"learning_rate": 8.43120818934367e-06,
"loss": 0.1717,
"step": 250
},
{
"epoch": 45.81818181818182,
"grad_norm": 0.6928288340568542,
"learning_rate": 8.397206521307584e-06,
"loss": 0.2247,
"step": 252
},
{
"epoch": 46.0,
"eval_loss": 0.4641251266002655,
"eval_runtime": 20.9989,
"eval_samples_per_second": 1.143,
"eval_steps_per_second": 1.143,
"step": 253
},
{
"epoch": 46.0,
"step": 253,
"total_flos": 3.15960483767124e+17,
"train_loss": 0.7967259936888699,
"train_runtime": 11647.481,
"train_samples_per_second": 1.133,
"train_steps_per_second": 0.064
}
],
"logging_steps": 2,
"max_steps": 750,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.15960483767124e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}