QuangDuy's picture
Upload checkpoint-6000
0520a1d verified
{
"best_global_step": 6000,
"best_metric": 4.305679076455633,
"best_model_checkpoint": "outputs/bert-tiny-stage2-sbert/checkpoints/checkpoint-6000",
"epoch": 1.4191106906338695,
"eval_steps": 2000,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011825922421948912,
"grad_norm": 37.789451599121094,
"learning_rate": 2.3173327027666118e-07,
"loss": 18.0314,
"step": 50
},
{
"epoch": 0.023651844843897825,
"grad_norm": 43.297508239746094,
"learning_rate": 4.6819579096713174e-07,
"loss": 17.7147,
"step": 100
},
{
"epoch": 0.035477767265846734,
"grad_norm": 36.56124496459961,
"learning_rate": 7.046583116576024e-07,
"loss": 17.0007,
"step": 150
},
{
"epoch": 0.04730368968779565,
"grad_norm": 34.428916931152344,
"learning_rate": 9.411208323480729e-07,
"loss": 16.0667,
"step": 200
},
{
"epoch": 0.05912961210974456,
"grad_norm": 33.16645812988281,
"learning_rate": 1.1775833530385434e-06,
"loss": 14.9131,
"step": 250
},
{
"epoch": 0.07095553453169347,
"grad_norm": 29.064250946044922,
"learning_rate": 1.4140458737290142e-06,
"loss": 13.9449,
"step": 300
},
{
"epoch": 0.08278145695364239,
"grad_norm": 31.42921257019043,
"learning_rate": 1.6505083944194847e-06,
"loss": 12.7957,
"step": 350
},
{
"epoch": 0.0946073793755913,
"grad_norm": 33.341365814208984,
"learning_rate": 1.8869709151099552e-06,
"loss": 11.7288,
"step": 400
},
{
"epoch": 0.10643330179754021,
"grad_norm": 36.34325408935547,
"learning_rate": 2.123433435800426e-06,
"loss": 10.6945,
"step": 450
},
{
"epoch": 0.11825922421948912,
"grad_norm": 39.06941604614258,
"learning_rate": 2.3598959564908965e-06,
"loss": 9.4743,
"step": 500
},
{
"epoch": 0.13008514664143803,
"grad_norm": 32.969947814941406,
"learning_rate": 2.596358477181367e-06,
"loss": 8.6215,
"step": 550
},
{
"epoch": 0.14191106906338694,
"grad_norm": 33.9212760925293,
"learning_rate": 2.8328209978718375e-06,
"loss": 7.7279,
"step": 600
},
{
"epoch": 0.15373699148533584,
"grad_norm": 32.65876007080078,
"learning_rate": 3.069283518562308e-06,
"loss": 7.1892,
"step": 650
},
{
"epoch": 0.16556291390728478,
"grad_norm": 29.210859298706055,
"learning_rate": 3.3057460392527786e-06,
"loss": 6.9682,
"step": 700
},
{
"epoch": 0.1773888363292337,
"grad_norm": 29.231189727783203,
"learning_rate": 3.5422085599432495e-06,
"loss": 6.4781,
"step": 750
},
{
"epoch": 0.1892147587511826,
"grad_norm": 28.949066162109375,
"learning_rate": 3.77867108063372e-06,
"loss": 6.1271,
"step": 800
},
{
"epoch": 0.2010406811731315,
"grad_norm": 29.826133728027344,
"learning_rate": 4.01513360132419e-06,
"loss": 6.1199,
"step": 850
},
{
"epoch": 0.21286660359508042,
"grad_norm": 27.585041046142578,
"learning_rate": 4.2515961220146615e-06,
"loss": 5.9544,
"step": 900
},
{
"epoch": 0.22469252601702933,
"grad_norm": 28.10279655456543,
"learning_rate": 4.488058642705131e-06,
"loss": 5.8145,
"step": 950
},
{
"epoch": 0.23651844843897823,
"grad_norm": 26.567943572998047,
"learning_rate": 4.7245211633956025e-06,
"loss": 5.5599,
"step": 1000
},
{
"epoch": 0.24834437086092714,
"grad_norm": 24.42616081237793,
"learning_rate": 4.960983684086072e-06,
"loss": 5.2344,
"step": 1050
},
{
"epoch": 0.26017029328287605,
"grad_norm": 25.857810974121094,
"learning_rate": 5.197446204776543e-06,
"loss": 5.3013,
"step": 1100
},
{
"epoch": 0.27199621570482496,
"grad_norm": 26.047733306884766,
"learning_rate": 5.433908725467014e-06,
"loss": 5.0562,
"step": 1150
},
{
"epoch": 0.28382213812677387,
"grad_norm": 26.875659942626953,
"learning_rate": 5.670371246157485e-06,
"loss": 4.8728,
"step": 1200
},
{
"epoch": 0.2956480605487228,
"grad_norm": 21.9539737701416,
"learning_rate": 5.906833766847954e-06,
"loss": 4.7826,
"step": 1250
},
{
"epoch": 0.3074739829706717,
"grad_norm": 23.06488609313965,
"learning_rate": 6.143296287538426e-06,
"loss": 4.8806,
"step": 1300
},
{
"epoch": 0.3192999053926206,
"grad_norm": 24.24974250793457,
"learning_rate": 6.379758808228896e-06,
"loss": 4.6464,
"step": 1350
},
{
"epoch": 0.33112582781456956,
"grad_norm": 22.658571243286133,
"learning_rate": 6.616221328919367e-06,
"loss": 4.7046,
"step": 1400
},
{
"epoch": 0.34295175023651847,
"grad_norm": 21.927656173706055,
"learning_rate": 6.852683849609837e-06,
"loss": 4.5188,
"step": 1450
},
{
"epoch": 0.3547776726584674,
"grad_norm": 24.39653778076172,
"learning_rate": 7.089146370300309e-06,
"loss": 4.4968,
"step": 1500
},
{
"epoch": 0.3666035950804163,
"grad_norm": 23.591333389282227,
"learning_rate": 7.325608890990778e-06,
"loss": 4.4387,
"step": 1550
},
{
"epoch": 0.3784295175023652,
"grad_norm": 24.572961807250977,
"learning_rate": 7.562071411681249e-06,
"loss": 4.1702,
"step": 1600
},
{
"epoch": 0.3902554399243141,
"grad_norm": 22.61821174621582,
"learning_rate": 7.79853393237172e-06,
"loss": 4.2147,
"step": 1650
},
{
"epoch": 0.402081362346263,
"grad_norm": 22.490327835083008,
"learning_rate": 8.03499645306219e-06,
"loss": 3.9972,
"step": 1700
},
{
"epoch": 0.4139072847682119,
"grad_norm": 23.695873260498047,
"learning_rate": 8.271458973752661e-06,
"loss": 4.1279,
"step": 1750
},
{
"epoch": 0.42573320719016083,
"grad_norm": 24.085838317871094,
"learning_rate": 8.507921494443131e-06,
"loss": 4.0214,
"step": 1800
},
{
"epoch": 0.43755912961210974,
"grad_norm": 20.78253173828125,
"learning_rate": 8.744384015133602e-06,
"loss": 3.9161,
"step": 1850
},
{
"epoch": 0.44938505203405865,
"grad_norm": 19.800090789794922,
"learning_rate": 8.980846535824072e-06,
"loss": 3.7544,
"step": 1900
},
{
"epoch": 0.46121097445600756,
"grad_norm": 22.900514602661133,
"learning_rate": 9.217309056514543e-06,
"loss": 3.8246,
"step": 1950
},
{
"epoch": 0.47303689687795647,
"grad_norm": 22.419363021850586,
"learning_rate": 9.453771577205015e-06,
"loss": 3.7991,
"step": 2000
},
{
"epoch": 0.47303689687795647,
"eval_runtime": 46.7005,
"eval_samples_per_second": 0.0,
"eval_steps_per_second": 0.0,
"eval_validation_loss": 5.98806651504585,
"step": 2000
},
{
"epoch": 0.4848628192999054,
"grad_norm": 22.308876037597656,
"learning_rate": 9.690234097895484e-06,
"loss": 3.8554,
"step": 2050
},
{
"epoch": 0.4966887417218543,
"grad_norm": 23.8614501953125,
"learning_rate": 9.926696618585954e-06,
"loss": 3.8123,
"step": 2100
},
{
"epoch": 0.5085146641438032,
"grad_norm": 21.00491714477539,
"learning_rate": 1.0163159139276425e-05,
"loss": 3.5525,
"step": 2150
},
{
"epoch": 0.5203405865657521,
"grad_norm": 25.555097579956055,
"learning_rate": 1.0399621659966897e-05,
"loss": 3.5591,
"step": 2200
},
{
"epoch": 0.532166508987701,
"grad_norm": 25.4840087890625,
"learning_rate": 1.0636084180657367e-05,
"loss": 3.6293,
"step": 2250
},
{
"epoch": 0.5439924314096499,
"grad_norm": 21.117971420288086,
"learning_rate": 1.0872546701347836e-05,
"loss": 3.5831,
"step": 2300
},
{
"epoch": 0.5558183538315988,
"grad_norm": 23.38995361328125,
"learning_rate": 1.1109009222038308e-05,
"loss": 3.6007,
"step": 2350
},
{
"epoch": 0.5676442762535477,
"grad_norm": 22.385738372802734,
"learning_rate": 1.1345471742728777e-05,
"loss": 3.4225,
"step": 2400
},
{
"epoch": 0.5794701986754967,
"grad_norm": 21.53306007385254,
"learning_rate": 1.158193426341925e-05,
"loss": 3.4405,
"step": 2450
},
{
"epoch": 0.5912961210974456,
"grad_norm": 22.93678092956543,
"learning_rate": 1.181839678410972e-05,
"loss": 3.4002,
"step": 2500
},
{
"epoch": 0.6031220435193945,
"grad_norm": 20.330045700073242,
"learning_rate": 1.2054859304800191e-05,
"loss": 3.3653,
"step": 2550
},
{
"epoch": 0.6149479659413434,
"grad_norm": 21.98198699951172,
"learning_rate": 1.2291321825490661e-05,
"loss": 3.321,
"step": 2600
},
{
"epoch": 0.6267738883632923,
"grad_norm": 18.49015998840332,
"learning_rate": 1.252778434618113e-05,
"loss": 3.3042,
"step": 2650
},
{
"epoch": 0.6385998107852412,
"grad_norm": 22.69803237915039,
"learning_rate": 1.2764246866871602e-05,
"loss": 3.2117,
"step": 2700
},
{
"epoch": 0.6504257332071902,
"grad_norm": 19.658132553100586,
"learning_rate": 1.3000709387562072e-05,
"loss": 3.3423,
"step": 2750
},
{
"epoch": 0.6622516556291391,
"grad_norm": 20.783931732177734,
"learning_rate": 1.3237171908252545e-05,
"loss": 3.2494,
"step": 2800
},
{
"epoch": 0.674077578051088,
"grad_norm": 17.039609909057617,
"learning_rate": 1.3473634428943014e-05,
"loss": 3.1364,
"step": 2850
},
{
"epoch": 0.6859035004730369,
"grad_norm": 21.787738800048828,
"learning_rate": 1.3710096949633484e-05,
"loss": 3.1836,
"step": 2900
},
{
"epoch": 0.6977294228949859,
"grad_norm": 20.883773803710938,
"learning_rate": 1.3946559470323956e-05,
"loss": 3.1268,
"step": 2950
},
{
"epoch": 0.7095553453169348,
"grad_norm": 17.700597763061523,
"learning_rate": 1.4183021991014425e-05,
"loss": 3.072,
"step": 3000
},
{
"epoch": 0.7213812677388837,
"grad_norm": 20.23262596130371,
"learning_rate": 1.4419484511704895e-05,
"loss": 3.0135,
"step": 3050
},
{
"epoch": 0.7332071901608326,
"grad_norm": 19.417842864990234,
"learning_rate": 1.4655947032395366e-05,
"loss": 3.0607,
"step": 3100
},
{
"epoch": 0.7450331125827815,
"grad_norm": 19.843341827392578,
"learning_rate": 1.4892409553085838e-05,
"loss": 3.0963,
"step": 3150
},
{
"epoch": 0.7568590350047304,
"grad_norm": 20.248523712158203,
"learning_rate": 1.5128872073776309e-05,
"loss": 3.0419,
"step": 3200
},
{
"epoch": 0.7686849574266793,
"grad_norm": 24.61260986328125,
"learning_rate": 1.5365334594466777e-05,
"loss": 2.9891,
"step": 3250
},
{
"epoch": 0.7805108798486282,
"grad_norm": 16.637826919555664,
"learning_rate": 1.560179711515725e-05,
"loss": 2.9384,
"step": 3300
},
{
"epoch": 0.7923368022705771,
"grad_norm": 24.341026306152344,
"learning_rate": 1.583825963584772e-05,
"loss": 2.8918,
"step": 3350
},
{
"epoch": 0.804162724692526,
"grad_norm": 18.246440887451172,
"learning_rate": 1.607472215653819e-05,
"loss": 2.9816,
"step": 3400
},
{
"epoch": 0.8159886471144749,
"grad_norm": 19.296022415161133,
"learning_rate": 1.631118467722866e-05,
"loss": 2.9664,
"step": 3450
},
{
"epoch": 0.8278145695364238,
"grad_norm": 19.331918716430664,
"learning_rate": 1.6547647197919134e-05,
"loss": 2.8969,
"step": 3500
},
{
"epoch": 0.8396404919583728,
"grad_norm": 25.586254119873047,
"learning_rate": 1.6784109718609602e-05,
"loss": 2.9368,
"step": 3550
},
{
"epoch": 0.8514664143803217,
"grad_norm": 19.701223373413086,
"learning_rate": 1.7020572239300073e-05,
"loss": 2.8513,
"step": 3600
},
{
"epoch": 0.8632923368022706,
"grad_norm": 16.68182945251465,
"learning_rate": 1.7257034759990545e-05,
"loss": 2.9808,
"step": 3650
},
{
"epoch": 0.8751182592242195,
"grad_norm": 19.592416763305664,
"learning_rate": 1.7493497280681013e-05,
"loss": 2.8428,
"step": 3700
},
{
"epoch": 0.8869441816461684,
"grad_norm": 20.324504852294922,
"learning_rate": 1.7729959801371484e-05,
"loss": 2.8775,
"step": 3750
},
{
"epoch": 0.8987701040681173,
"grad_norm": 19.49851417541504,
"learning_rate": 1.7966422322061955e-05,
"loss": 2.739,
"step": 3800
},
{
"epoch": 0.9105960264900662,
"grad_norm": 19.18546485900879,
"learning_rate": 1.8202884842752427e-05,
"loss": 2.8277,
"step": 3850
},
{
"epoch": 0.9224219489120151,
"grad_norm": 23.6113338470459,
"learning_rate": 1.8439347363442898e-05,
"loss": 2.767,
"step": 3900
},
{
"epoch": 0.934247871333964,
"grad_norm": 19.779712677001953,
"learning_rate": 1.8675809884133366e-05,
"loss": 2.794,
"step": 3950
},
{
"epoch": 0.9460737937559129,
"grad_norm": 23.361425399780273,
"learning_rate": 1.8912272404823837e-05,
"loss": 2.7738,
"step": 4000
},
{
"epoch": 0.9460737937559129,
"eval_runtime": 47.0317,
"eval_samples_per_second": 0.0,
"eval_steps_per_second": 0.0,
"eval_validation_loss": 4.773771009103065,
"step": 4000
},
{
"epoch": 0.9578997161778618,
"grad_norm": 18.137535095214844,
"learning_rate": 1.914873492551431e-05,
"loss": 2.8568,
"step": 4050
},
{
"epoch": 0.9697256385998108,
"grad_norm": 18.014116287231445,
"learning_rate": 1.9385197446204777e-05,
"loss": 2.7938,
"step": 4100
},
{
"epoch": 0.9815515610217597,
"grad_norm": 17.168569564819336,
"learning_rate": 1.9621659966895248e-05,
"loss": 2.7272,
"step": 4150
},
{
"epoch": 0.9933774834437086,
"grad_norm": 17.75269889831543,
"learning_rate": 1.985812248758572e-05,
"loss": 2.7079,
"step": 4200
},
{
"epoch": 1.0052034058656576,
"grad_norm": 19.342844009399414,
"learning_rate": 1.9976346756548995e-05,
"loss": 2.6383,
"step": 4250
},
{
"epoch": 1.0170293282876064,
"grad_norm": 17.54117774963379,
"learning_rate": 1.9917213647921473e-05,
"loss": 2.6855,
"step": 4300
},
{
"epoch": 1.0288552507095554,
"grad_norm": 18.412206649780273,
"learning_rate": 1.9858080539293952e-05,
"loss": 2.6568,
"step": 4350
},
{
"epoch": 1.0406811731315042,
"grad_norm": 18.794939041137695,
"learning_rate": 1.979894743066643e-05,
"loss": 2.5981,
"step": 4400
},
{
"epoch": 1.0525070955534532,
"grad_norm": 17.26803970336914,
"learning_rate": 1.973981432203891e-05,
"loss": 2.6987,
"step": 4450
},
{
"epoch": 1.064333017975402,
"grad_norm": 15.831737518310547,
"learning_rate": 1.968068121341139e-05,
"loss": 2.6992,
"step": 4500
},
{
"epoch": 1.076158940397351,
"grad_norm": 16.746700286865234,
"learning_rate": 1.962154810478387e-05,
"loss": 2.5434,
"step": 4550
},
{
"epoch": 1.0879848628192998,
"grad_norm": 18.824857711791992,
"learning_rate": 1.956241499615635e-05,
"loss": 2.5553,
"step": 4600
},
{
"epoch": 1.0998107852412489,
"grad_norm": 16.81246566772461,
"learning_rate": 1.9503281887528828e-05,
"loss": 2.4978,
"step": 4650
},
{
"epoch": 1.1116367076631977,
"grad_norm": 18.369991302490234,
"learning_rate": 1.9444148778901307e-05,
"loss": 2.5679,
"step": 4700
},
{
"epoch": 1.1234626300851467,
"grad_norm": 19.55158805847168,
"learning_rate": 1.938501567027379e-05,
"loss": 2.4768,
"step": 4750
},
{
"epoch": 1.1352885525070955,
"grad_norm": 20.673002243041992,
"learning_rate": 1.9325882561646268e-05,
"loss": 2.5578,
"step": 4800
},
{
"epoch": 1.1471144749290445,
"grad_norm": 17.067432403564453,
"learning_rate": 1.9266749453018747e-05,
"loss": 2.4758,
"step": 4850
},
{
"epoch": 1.1589403973509933,
"grad_norm": 22.328304290771484,
"learning_rate": 1.9207616344391226e-05,
"loss": 2.5352,
"step": 4900
},
{
"epoch": 1.1707663197729423,
"grad_norm": 15.121694564819336,
"learning_rate": 1.9148483235763708e-05,
"loss": 2.5023,
"step": 4950
},
{
"epoch": 1.1825922421948911,
"grad_norm": 15.201376914978027,
"learning_rate": 1.9089350127136187e-05,
"loss": 2.4713,
"step": 5000
},
{
"epoch": 1.1944181646168401,
"grad_norm": 20.54207992553711,
"learning_rate": 1.9030217018508665e-05,
"loss": 2.486,
"step": 5050
},
{
"epoch": 1.206244087038789,
"grad_norm": 16.934635162353516,
"learning_rate": 1.8971083909881144e-05,
"loss": 2.483,
"step": 5100
},
{
"epoch": 1.218070009460738,
"grad_norm": 16.963790893554688,
"learning_rate": 1.8911950801253623e-05,
"loss": 2.4098,
"step": 5150
},
{
"epoch": 1.2298959318826868,
"grad_norm": 16.505352020263672,
"learning_rate": 1.8852817692626102e-05,
"loss": 2.5061,
"step": 5200
},
{
"epoch": 1.2417218543046358,
"grad_norm": 16.634069442749023,
"learning_rate": 1.879368458399858e-05,
"loss": 2.4597,
"step": 5250
},
{
"epoch": 1.2535477767265846,
"grad_norm": 16.373046875,
"learning_rate": 1.8734551475371063e-05,
"loss": 2.4591,
"step": 5300
},
{
"epoch": 1.2653736991485336,
"grad_norm": 21.308876037597656,
"learning_rate": 1.867541836674354e-05,
"loss": 2.3879,
"step": 5350
},
{
"epoch": 1.2771996215704826,
"grad_norm": 20.565275192260742,
"learning_rate": 1.861628525811602e-05,
"loss": 2.4146,
"step": 5400
},
{
"epoch": 1.2890255439924314,
"grad_norm": 15.853353500366211,
"learning_rate": 1.85571521494885e-05,
"loss": 2.3418,
"step": 5450
},
{
"epoch": 1.3008514664143802,
"grad_norm": 13.12362003326416,
"learning_rate": 1.8498019040860978e-05,
"loss": 2.4307,
"step": 5500
},
{
"epoch": 1.3126773888363292,
"grad_norm": 19.059667587280273,
"learning_rate": 1.843888593223346e-05,
"loss": 2.3653,
"step": 5550
},
{
"epoch": 1.3245033112582782,
"grad_norm": 17.448827743530273,
"learning_rate": 1.837975282360594e-05,
"loss": 2.3995,
"step": 5600
},
{
"epoch": 1.336329233680227,
"grad_norm": 18.326887130737305,
"learning_rate": 1.8320619714978418e-05,
"loss": 2.4527,
"step": 5650
},
{
"epoch": 1.3481551561021758,
"grad_norm": 18.03122901916504,
"learning_rate": 1.8261486606350896e-05,
"loss": 2.4547,
"step": 5700
},
{
"epoch": 1.3599810785241249,
"grad_norm": 18.269872665405273,
"learning_rate": 1.820235349772338e-05,
"loss": 2.3695,
"step": 5750
},
{
"epoch": 1.3718070009460739,
"grad_norm": 16.90838623046875,
"learning_rate": 1.8143220389095857e-05,
"loss": 2.3341,
"step": 5800
},
{
"epoch": 1.3836329233680227,
"grad_norm": 18.816362380981445,
"learning_rate": 1.8084087280468336e-05,
"loss": 2.2412,
"step": 5850
},
{
"epoch": 1.3954588457899715,
"grad_norm": 17.30527687072754,
"learning_rate": 1.8024954171840815e-05,
"loss": 2.2695,
"step": 5900
},
{
"epoch": 1.4072847682119205,
"grad_norm": 18.299711227416992,
"learning_rate": 1.7965821063213297e-05,
"loss": 2.2922,
"step": 5950
},
{
"epoch": 1.4191106906338695,
"grad_norm": 18.047449111938477,
"learning_rate": 1.7906687954585773e-05,
"loss": 2.3176,
"step": 6000
},
{
"epoch": 1.4191106906338695,
"eval_runtime": 46.9839,
"eval_samples_per_second": 0.0,
"eval_steps_per_second": 0.0,
"eval_validation_loss": 4.305679076455633,
"step": 6000
}
],
"logging_steps": 50,
"max_steps": 21140,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}