generated from xuyuqing/ailab
980 lines
19 KiB
JSON
980 lines
19 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.99249530956848,
|
|
"global_step": 1596,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.03,
|
|
"learning_rate": 0.0004999690033339971,
|
|
"loss": 2.3543,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.05,
|
|
"learning_rate": 0.0004998430925516213,
|
|
"loss": 1.1795,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.08,
|
|
"learning_rate": 0.000499620379108329,
|
|
"loss": 1.1243,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.1,
|
|
"learning_rate": 0.000499300949295295,
|
|
"loss": 1.1254,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.13,
|
|
"learning_rate": 0.000498884926876821,
|
|
"loss": 1.0875,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.15,
|
|
"learning_rate": 0.0004983724730423824,
|
|
"loss": 1.1023,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.18,
|
|
"learning_rate": 0.0004977637863441759,
|
|
"loss": 1.1516,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.2,
|
|
"learning_rate": 0.0004970591026201884,
|
|
"loss": 1.1248,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.23,
|
|
"learning_rate": 0.0004962586949028218,
|
|
"loss": 1.0591,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.25,
|
|
"learning_rate": 0.0004953628733131045,
|
|
"loss": 1.1047,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.28,
|
|
"learning_rate": 0.0004943719849405347,
|
|
"loss": 1.0747,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.3,
|
|
"learning_rate": 0.0004932864137085986,
|
|
"loss": 1.118,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.33,
|
|
"learning_rate": 0.0004921065802260185,
|
|
"loss": 1.089,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.35,
|
|
"learning_rate": 0.0004908329416237854,
|
|
"loss": 1.0948,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.38,
|
|
"learning_rate": 0.0004894659913780427,
|
|
"loss": 1.0958,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.4,
|
|
"learning_rate": 0.00048800625911888653,
|
|
"loss": 1.1008,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.43,
|
|
"learning_rate": 0.00048645431042515866,
|
|
"loss": 1.0913,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 0.45,
|
|
"learning_rate": 0.0004848107466053109,
|
|
"loss": 1.0517,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 0.48,
|
|
"learning_rate": 0.00048307620446442546,
|
|
"loss": 1.0617,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 0.5,
|
|
"learning_rate": 0.00048125135605748317,
|
|
"loss": 1.0274,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.53,
|
|
"learning_rate": 0.00047933690842897226,
|
|
"loss": 1.0802,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 0.55,
|
|
"learning_rate": 0.0004773336033389421,
|
|
"loss": 1.1106,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 0.58,
|
|
"learning_rate": 0.00047524221697560476,
|
|
"loss": 1.0899,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 0.6,
|
|
"learning_rate": 0.0004730635596545985,
|
|
"loss": 1.1016,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 0.63,
|
|
"learning_rate": 0.0004707984755050274,
|
|
"loss": 1.0725,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.65,
|
|
"learning_rate": 0.0004684478421424007,
|
|
"loss": 1.0925,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 0.68,
|
|
"learning_rate": 0.000466012570328597,
|
|
"loss": 1.0931,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 0.7,
|
|
"learning_rate": 0.0004634936036189862,
|
|
"loss": 1.0841,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 0.73,
|
|
"learning_rate": 0.0004608919179968457,
|
|
"loss": 1.0982,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 0.75,
|
|
"learning_rate": 0.0004582085214952112,
|
|
"loss": 1.0526,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.78,
|
|
"learning_rate": 0.00045600367008032127,
|
|
"loss": 1.0635,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 0.8,
|
|
"learning_rate": 0.00045317583503813255,
|
|
"loss": 1.0959,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 0.83,
|
|
"learning_rate": 0.0004502692787422771,
|
|
"loss": 1.064,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 0.85,
|
|
"learning_rate": 0.00044728512734909845,
|
|
"loss": 1.0866,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 0.88,
|
|
"learning_rate": 0.000444224537079457,
|
|
"loss": 1.0763,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 0.9,
|
|
"learning_rate": 0.000441088693770748,
|
|
"loss": 1.0991,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 0.93,
|
|
"learning_rate": 0.0004378788124174441,
|
|
"loss": 1.1167,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 0.95,
|
|
"learning_rate": 0.00043459613670034135,
|
|
"loss": 1.0791,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 0.98,
|
|
"learning_rate": 0.0004312419385046902,
|
|
"loss": 1.0787,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"learning_rate": 0.0004278175174273988,
|
|
"loss": 1.072,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 1.03,
|
|
"learning_rate": 0.0004250283108026474,
|
|
"loss": 1.0384,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 1.05,
|
|
"learning_rate": 0.00042148085004302204,
|
|
"loss": 1.0314,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 1.08,
|
|
"learning_rate": 0.0004178669483727803,
|
|
"loss": 1.0377,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 1.1,
|
|
"learning_rate": 0.00041418800601193364,
|
|
"loss": 1.0471,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 1.13,
|
|
"learning_rate": 0.00041044544838075793,
|
|
"loss": 1.0559,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 1.15,
|
|
"learning_rate": 0.0004066407255475085,
|
|
"loss": 1.0777,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 1.18,
|
|
"learning_rate": 0.0004027753116665859,
|
|
"loss": 1.0484,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 1.2,
|
|
"learning_rate": 0.0003988507044073687,
|
|
"loss": 1.0516,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 1.23,
|
|
"learning_rate": 0.0003948684243739366,
|
|
"loss": 1.0274,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 1.25,
|
|
"learning_rate": 0.0003908300145159055,
|
|
"loss": 1.0084,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 1.28,
|
|
"learning_rate": 0.0003875599239170917,
|
|
"loss": 1.0323,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 1.3,
|
|
"learning_rate": 0.0003834244376397034,
|
|
"loss": 0.9866,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 1.33,
|
|
"learning_rate": 0.0003792372555541064,
|
|
"loss": 1.034,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 1.35,
|
|
"learning_rate": 0.000375,
|
|
"loss": 1.034,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 1.38,
|
|
"learning_rate": 0.0003707143127182402,
|
|
"loss": 1.0133,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 1.4,
|
|
"learning_rate": 0.00036638185421474087,
|
|
"loss": 1.0004,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 1.43,
|
|
"learning_rate": 0.0003620043031171043,
|
|
"loss": 1.0331,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 1.45,
|
|
"learning_rate": 0.0003575833555242313,
|
|
"loss": 1.0196,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 1.48,
|
|
"learning_rate": 0.0003531207243491598,
|
|
"loss": 1.0545,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 1.5,
|
|
"learning_rate": 0.0003486181386553916,
|
|
"loss": 1.0751,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 1.53,
|
|
"learning_rate": 0.0003440773429869589,
|
|
"loss": 1.033,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 1.55,
|
|
"learning_rate": 0.00034041837707267993,
|
|
"loss": 1.0527,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 1.58,
|
|
"learning_rate": 0.0003358132465220639,
|
|
"loss": 1.046,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 1.6,
|
|
"learning_rate": 0.00033117486730117093,
|
|
"loss": 1.0271,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 1.63,
|
|
"learning_rate": 0.00032650503656775446,
|
|
"loss": 1.0153,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 1.65,
|
|
"learning_rate": 0.0003218055636655766,
|
|
"loss": 1.0502,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 1.68,
|
|
"learning_rate": 0.0003170782694233712,
|
|
"loss": 1.0469,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 1.7,
|
|
"learning_rate": 0.00031232498544935634,
|
|
"loss": 1.005,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 1.73,
|
|
"learning_rate": 0.0003075475534215712,
|
|
"loss": 1.079,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 1.75,
|
|
"learning_rate": 0.0003027478243743106,
|
|
"loss": 1.0188,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 1.78,
|
|
"learning_rate": 0.0002979276579809346,
|
|
"loss": 1.049,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 1.8,
|
|
"learning_rate": 0.0002940580647511805,
|
|
"loss": 1.0118,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 1.83,
|
|
"learning_rate": 0.0002892058223257285,
|
|
"loss": 1.0247,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 1.85,
|
|
"learning_rate": 0.00028433838945460206,
|
|
"loss": 1.0649,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 1.88,
|
|
"learning_rate": 0.0002794576520432666,
|
|
"loss": 1.0099,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 1.9,
|
|
"learning_rate": 0.00027456550115208267,
|
|
"loss": 1.0518,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 1.93,
|
|
"learning_rate": 0.00026966383226360695,
|
|
"loss": 1.0151,
|
|
"step": 770
|
|
},
|
|
{
|
|
"epoch": 1.95,
|
|
"learning_rate": 0.0002647545445481807,
|
|
"loss": 1.0874,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 1.98,
|
|
"learning_rate": 0.0002598395401280888,
|
|
"loss": 1.0086,
|
|
"step": 790
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"learning_rate": 0.0002549207233405752,
|
|
"loss": 1.042,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 2.03,
|
|
"learning_rate": 0.00025,
|
|
"loss": 1.0053,
|
|
"step": 810
|
|
},
|
|
{
|
|
"epoch": 2.05,
|
|
"learning_rate": 0.00024606332980703556,
|
|
"loss": 0.9431,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 2.08,
|
|
"learning_rate": 0.00024114397920948656,
|
|
"loss": 0.9556,
|
|
"step": 830
|
|
},
|
|
{
|
|
"epoch": 2.1,
|
|
"learning_rate": 0.0002362280599110336,
|
|
"loss": 0.9853,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 2.13,
|
|
"learning_rate": 0.00023131747660339394,
|
|
"loss": 0.9624,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 2.15,
|
|
"learning_rate": 0.00022641413191083445,
|
|
"loss": 1.0041,
|
|
"step": 860
|
|
},
|
|
{
|
|
"epoch": 2.18,
|
|
"learning_rate": 0.00022151992565299305,
|
|
"loss": 1.0164,
|
|
"step": 870
|
|
},
|
|
{
|
|
"epoch": 2.2,
|
|
"learning_rate": 0.0002166367541087862,
|
|
"loss": 0.9754,
|
|
"step": 880
|
|
},
|
|
{
|
|
"epoch": 2.23,
|
|
"learning_rate": 0.0002117665092816885,
|
|
"loss": 1.026,
|
|
"step": 890
|
|
},
|
|
{
|
|
"epoch": 2.25,
|
|
"learning_rate": 0.00020691107816666853,
|
|
"loss": 0.9588,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 2.28,
|
|
"learning_rate": 0.00020207234201906547,
|
|
"loss": 0.9782,
|
|
"step": 910
|
|
},
|
|
{
|
|
"epoch": 2.3,
|
|
"learning_rate": 0.00019821463357016516,
|
|
"loss": 0.976,
|
|
"step": 920
|
|
},
|
|
{
|
|
"epoch": 2.33,
|
|
"learning_rate": 0.0001934106680160237,
|
|
"loss": 0.9655,
|
|
"step": 930
|
|
},
|
|
{
|
|
"epoch": 2.35,
|
|
"learning_rate": 0.00018862862821480023,
|
|
"loss": 0.9635,
|
|
"step": 940
|
|
},
|
|
{
|
|
"epoch": 2.38,
|
|
"learning_rate": 0.0001838703669860889,
|
|
"loss": 0.9856,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 2.4,
|
|
"learning_rate": 0.00017913772793638516,
|
|
"loss": 0.9806,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 2.43,
|
|
"learning_rate": 0.00017443254474477327,
|
|
"loss": 0.9534,
|
|
"step": 970
|
|
},
|
|
{
|
|
"epoch": 2.45,
|
|
"learning_rate": 0.0001697566404524606,
|
|
"loss": 0.9758,
|
|
"step": 980
|
|
},
|
|
{
|
|
"epoch": 2.48,
|
|
"learning_rate": 0.00016511182675643272,
|
|
"loss": 0.9761,
|
|
"step": 990
|
|
},
|
|
{
|
|
"epoch": 2.5,
|
|
"learning_rate": 0.00016049990330750509,
|
|
"loss": 1.0078,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 2.53,
|
|
"learning_rate": 0.00015592265701304116,
|
|
"loss": 0.9554,
|
|
"step": 1010
|
|
},
|
|
{
|
|
"epoch": 2.55,
|
|
"learning_rate": 0.0001522870197705943,
|
|
"loss": 0.995,
|
|
"step": 1020
|
|
},
|
|
{
|
|
"epoch": 2.58,
|
|
"learning_rate": 0.00014777665203485283,
|
|
"loss": 0.961,
|
|
"step": 1030
|
|
},
|
|
{
|
|
"epoch": 2.6,
|
|
"learning_rate": 0.0001433058911258991,
|
|
"loss": 0.9663,
|
|
"step": 1040
|
|
},
|
|
{
|
|
"epoch": 2.63,
|
|
"learning_rate": 0.00013887646925713116,
|
|
"loss": 0.9904,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 2.65,
|
|
"learning_rate": 0.00013449010262497774,
|
|
"loss": 0.9935,
|
|
"step": 1060
|
|
},
|
|
{
|
|
"epoch": 2.68,
|
|
"learning_rate": 0.00013014849074395197,
|
|
"loss": 0.9857,
|
|
"step": 1070
|
|
},
|
|
{
|
|
"epoch": 2.7,
|
|
"learning_rate": 0.0001258533157881674,
|
|
"loss": 0.9693,
|
|
"step": 1080
|
|
},
|
|
{
|
|
"epoch": 2.73,
|
|
"learning_rate": 0.00012160624193957337,
|
|
"loss": 0.9986,
|
|
"step": 1090
|
|
},
|
|
{
|
|
"epoch": 2.75,
|
|
"learning_rate": 0.00011740891474316156,
|
|
"loss": 0.9771,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 2.78,
|
|
"learning_rate": 0.00011326296046939332,
|
|
"loss": 0.9624,
|
|
"step": 1110
|
|
},
|
|
{
|
|
"epoch": 2.8,
|
|
"learning_rate": 0.00010916998548409449,
|
|
"loss": 1.0205,
|
|
"step": 1120
|
|
},
|
|
{
|
|
"epoch": 2.83,
|
|
"learning_rate": 0.00010593481697266582,
|
|
"loss": 0.9676,
|
|
"step": 1130
|
|
},
|
|
{
|
|
"epoch": 2.85,
|
|
"learning_rate": 0.00010194118683375503,
|
|
"loss": 1.0014,
|
|
"step": 1140
|
|
},
|
|
{
|
|
"epoch": 2.88,
|
|
"learning_rate": 9.800492264777183e-05,
|
|
"loss": 0.9611,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 2.9,
|
|
"learning_rate": 9.412754953531663e-05,
|
|
"loss": 0.985,
|
|
"step": 1160
|
|
},
|
|
{
|
|
"epoch": 2.93,
|
|
"learning_rate": 9.031056979941806e-05,
|
|
"loss": 0.9592,
|
|
"step": 1170
|
|
},
|
|
{
|
|
"epoch": 2.95,
|
|
"learning_rate": 8.65554623434604e-05,
|
|
"loss": 0.9723,
|
|
"step": 1180
|
|
},
|
|
{
|
|
"epoch": 2.98,
|
|
"learning_rate": 8.286368209817644e-05,
|
|
"loss": 0.9885,
|
|
"step": 1190
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"learning_rate": 7.923665945792944e-05,
|
|
"loss": 0.9853,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 3.03,
|
|
"learning_rate": 7.567579972650115e-05,
|
|
"loss": 0.9338,
|
|
"step": 1210
|
|
},
|
|
{
|
|
"epoch": 3.05,
|
|
"learning_rate": 7.218248257260127e-05,
|
|
"loss": 0.915,
|
|
"step": 1220
|
|
},
|
|
{
|
|
"epoch": 3.08,
|
|
"learning_rate": 6.94373699494272e-05,
|
|
"loss": 0.9401,
|
|
"step": 1230
|
|
},
|
|
{
|
|
"epoch": 3.1,
|
|
"learning_rate": 6.60690223317171e-05,
|
|
"loss": 0.8936,
|
|
"step": 1240
|
|
},
|
|
{
|
|
"epoch": 3.13,
|
|
"learning_rate": 6.277193947414675e-05,
|
|
"loss": 0.9237,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"epoch": 3.15,
|
|
"learning_rate": 5.954739884406821e-05,
|
|
"loss": 0.9665,
|
|
"step": 1260
|
|
},
|
|
{
|
|
"epoch": 3.18,
|
|
"learning_rate": 5.639664980207024e-05,
|
|
"loss": 0.9007,
|
|
"step": 1270
|
|
},
|
|
{
|
|
"epoch": 3.2,
|
|
"learning_rate": 5.332091311790888e-05,
|
|
"loss": 0.9468,
|
|
"step": 1280
|
|
},
|
|
{
|
|
"epoch": 3.23,
|
|
"learning_rate": 5.03213804975155e-05,
|
|
"loss": 0.9215,
|
|
"step": 1290
|
|
},
|
|
{
|
|
"epoch": 3.25,
|
|
"learning_rate": 4.739921412126591e-05,
|
|
"loss": 0.8978,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 3.28,
|
|
"learning_rate": 4.455554619368874e-05,
|
|
"loss": 0.9268,
|
|
"step": 1310
|
|
},
|
|
{
|
|
"epoch": 3.3,
|
|
"learning_rate": 4.179147850478876e-05,
|
|
"loss": 0.9546,
|
|
"step": 1320
|
|
},
|
|
{
|
|
"epoch": 3.33,
|
|
"learning_rate": 3.9638257249896944e-05,
|
|
"loss": 0.957,
|
|
"step": 1330
|
|
},
|
|
{
|
|
"epoch": 3.35,
|
|
"learning_rate": 3.7020147790418265e-05,
|
|
"loss": 0.9297,
|
|
"step": 1340
|
|
},
|
|
{
|
|
"epoch": 3.38,
|
|
"learning_rate": 3.448455818852267e-05,
|
|
"loss": 0.9153,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"epoch": 3.4,
|
|
"learning_rate": 3.203247086809041e-05,
|
|
"loss": 0.9311,
|
|
"step": 1360
|
|
},
|
|
{
|
|
"epoch": 3.43,
|
|
"learning_rate": 2.9664835899723936e-05,
|
|
"loss": 0.9273,
|
|
"step": 1370
|
|
},
|
|
{
|
|
"epoch": 3.45,
|
|
"learning_rate": 2.7382570632638855e-05,
|
|
"loss": 0.9725,
|
|
"step": 1380
|
|
},
|
|
{
|
|
"epoch": 3.48,
|
|
"learning_rate": 2.5186559339234085e-05,
|
|
"loss": 0.9288,
|
|
"step": 1390
|
|
},
|
|
{
|
|
"epoch": 3.5,
|
|
"learning_rate": 2.3077652872476624e-05,
|
|
"loss": 0.9152,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 3.53,
|
|
"learning_rate": 2.1056668336235624e-05,
|
|
"loss": 0.915,
|
|
"step": 1410
|
|
},
|
|
{
|
|
"epoch": 3.55,
|
|
"learning_rate": 1.912438876869238e-05,
|
|
"loss": 0.9102,
|
|
"step": 1420
|
|
},
|
|
{
|
|
"epoch": 3.58,
|
|
"learning_rate": 1.764293696003358e-05,
|
|
"loss": 0.8859,
|
|
"step": 1430
|
|
},
|
|
{
|
|
"epoch": 3.6,
|
|
"learning_rate": 1.587218970073634e-05,
|
|
"loss": 0.9584,
|
|
"step": 1440
|
|
},
|
|
{
|
|
"epoch": 3.63,
|
|
"learning_rate": 1.4192156156195151e-05,
|
|
"loss": 0.9442,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"epoch": 3.65,
|
|
"learning_rate": 1.2603487261826724e-05,
|
|
"loss": 0.9237,
|
|
"step": 1460
|
|
},
|
|
{
|
|
"epoch": 3.68,
|
|
"learning_rate": 1.1106798553464804e-05,
|
|
"loss": 0.9215,
|
|
"step": 1470
|
|
},
|
|
{
|
|
"epoch": 3.7,
|
|
"learning_rate": 9.702669928868674e-06,
|
|
"loss": 0.9742,
|
|
"step": 1480
|
|
},
|
|
{
|
|
"epoch": 3.73,
|
|
"learning_rate": 8.391645423039356e-06,
|
|
"loss": 0.9328,
|
|
"step": 1490
|
|
},
|
|
{
|
|
"epoch": 3.75,
|
|
"learning_rate": 7.1742329974313905e-06,
|
|
"loss": 0.959,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 3.78,
|
|
"learning_rate": 6.050904343141095e-06,
|
|
"loss": 0.9513,
|
|
"step": 1510
|
|
},
|
|
{
|
|
"epoch": 3.8,
|
|
"learning_rate": 5.022094698148072e-06,
|
|
"loss": 0.9556,
|
|
"step": 1520
|
|
},
|
|
{
|
|
"epoch": 3.83,
|
|
"learning_rate": 4.267369773460511e-06,
|
|
"loss": 0.9389,
|
|
"step": 1530
|
|
},
|
|
{
|
|
"epoch": 3.85,
|
|
"learning_rate": 3.4096741493194194e-06,
|
|
"loss": 0.9352,
|
|
"step": 1540
|
|
},
|
|
{
|
|
"epoch": 3.88,
|
|
"learning_rate": 2.647520889206889e-06,
|
|
"loss": 0.9231,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"epoch": 3.9,
|
|
"learning_rate": 1.9812052923159906e-06,
|
|
"loss": 0.8914,
|
|
"step": 1560
|
|
},
|
|
{
|
|
"epoch": 3.93,
|
|
"learning_rate": 1.410985525170827e-06,
|
|
"loss": 0.9654,
|
|
"step": 1570
|
|
},
|
|
{
|
|
"epoch": 3.95,
|
|
"learning_rate": 9.370825215991308e-07,
|
|
"loss": 0.932,
|
|
"step": 1580
|
|
},
|
|
{
|
|
"epoch": 3.98,
|
|
"learning_rate": 5.596798971305439e-07,
|
|
"loss": 0.9324,
|
|
"step": 1590
|
|
},
|
|
{
|
|
"epoch": 3.99,
|
|
"step": 1596,
|
|
"total_flos": 6.727661159278707e+18,
|
|
"train_loss": 1.0181221991852112,
|
|
"train_runtime": 7513.941,
|
|
"train_samples_per_second": 27.23,
|
|
"train_steps_per_second": 0.212
|
|
}
|
|
],
|
|
"max_steps": 1596,
|
|
"num_train_epochs": 4,
|
|
"total_flos": 6.727661159278707e+18,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|