generated from xuyuqing/ailab
626 lines
12 KiB
JSON
626 lines
12 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.0,
|
|
"global_step": 960,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.03,
|
|
"learning_rate": 3.4482758620689657e-05,
|
|
"loss": 1.9291,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.06,
|
|
"learning_rate": 6.896551724137931e-05,
|
|
"loss": 1.8312,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.09,
|
|
"learning_rate": 9.99997153311283e-05,
|
|
"loss": 1.7861,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.12,
|
|
"learning_rate": 9.996555898851128e-05,
|
|
"loss": 1.8195,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.16,
|
|
"learning_rate": 9.987451343321279e-05,
|
|
"loss": 1.7855,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.19,
|
|
"learning_rate": 9.972668232683624e-05,
|
|
"loss": 1.7271,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.22,
|
|
"learning_rate": 9.952223398522602e-05,
|
|
"loss": 1.7924,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.25,
|
|
"learning_rate": 9.926140118682841e-05,
|
|
"loss": 1.7747,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.28,
|
|
"learning_rate": 9.894448090765732e-05,
|
|
"loss": 1.7476,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.31,
|
|
"learning_rate": 9.85718339831668e-05,
|
|
"loss": 1.7275,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.34,
|
|
"learning_rate": 9.814388469741518e-05,
|
|
"loss": 1.7466,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.38,
|
|
"learning_rate": 9.766112029998846e-05,
|
|
"loss": 1.7395,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.41,
|
|
"learning_rate": 9.712409045123347e-05,
|
|
"loss": 1.7458,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.44,
|
|
"learning_rate": 9.653340659643169e-05,
|
|
"loss": 1.7458,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.47,
|
|
"learning_rate": 9.588974126962705e-05,
|
|
"loss": 1.7236,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.5,
|
|
"learning_rate": 9.519382732789979e-05,
|
|
"loss": 1.6927,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.53,
|
|
"learning_rate": 9.444645711695842e-05,
|
|
"loss": 1.7096,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 0.56,
|
|
"learning_rate": 9.36484815689999e-05,
|
|
"loss": 1.7183,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 0.59,
|
|
"learning_rate": 9.280080923386501e-05,
|
|
"loss": 1.6232,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 0.62,
|
|
"learning_rate": 9.190440524459203e-05,
|
|
"loss": 1.6853,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.66,
|
|
"learning_rate": 9.096029021854677e-05,
|
|
"loss": 1.6604,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 0.69,
|
|
"learning_rate": 8.996953909537962e-05,
|
|
"loss": 1.5873,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 0.72,
|
|
"learning_rate": 8.893327991313322e-05,
|
|
"loss": 1.6196,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 0.75,
|
|
"learning_rate": 8.796271135721944e-05,
|
|
"loss": 1.6748,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 0.78,
|
|
"learning_rate": 8.684327909794213e-05,
|
|
"loss": 1.6586,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.81,
|
|
"learning_rate": 8.568189824081232e-05,
|
|
"loss": 1.6048,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 0.84,
|
|
"learning_rate": 8.447989109751673e-05,
|
|
"loss": 1.6356,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 0.88,
|
|
"learning_rate": 8.323862623555276e-05,
|
|
"loss": 1.5976,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 0.91,
|
|
"learning_rate": 8.195951692002052e-05,
|
|
"loss": 1.606,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 0.94,
|
|
"learning_rate": 8.077716434076092e-05,
|
|
"loss": 1.6089,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.97,
|
|
"learning_rate": 7.943019709483513e-05,
|
|
"loss": 1.6383,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"learning_rate": 7.804972155288145e-05,
|
|
"loss": 1.5861,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_loss": 1.596409559249878,
|
|
"eval_runtime": 38.3965,
|
|
"eval_samples_per_second": 266.587,
|
|
"eval_steps_per_second": 8.334,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 1.03,
|
|
"learning_rate": 7.663730948089827e-05,
|
|
"loss": 1.5533,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 1.06,
|
|
"learning_rate": 7.519456900681179e-05,
|
|
"loss": 1.561,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 1.09,
|
|
"learning_rate": 7.372314278951213e-05,
|
|
"loss": 1.532,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 1.12,
|
|
"learning_rate": 7.22247061485738e-05,
|
|
"loss": 1.5622,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 1.16,
|
|
"learning_rate": 7.070096515678927e-05,
|
|
"loss": 1.5546,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 1.19,
|
|
"learning_rate": 6.915365469768857e-05,
|
|
"loss": 1.5469,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 1.22,
|
|
"learning_rate": 6.758453649025516e-05,
|
|
"loss": 1.5391,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 1.25,
|
|
"learning_rate": 6.599539708308837e-05,
|
|
"loss": 1.5008,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 1.28,
|
|
"learning_rate": 6.438804582029532e-05,
|
|
"loss": 1.538,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 1.31,
|
|
"learning_rate": 6.27643127814287e-05,
|
|
"loss": 1.4894,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 1.34,
|
|
"learning_rate": 6.112604669781572e-05,
|
|
"loss": 1.5066,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 1.38,
|
|
"learning_rate": 5.9475112847650985e-05,
|
|
"loss": 1.4565,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 1.41,
|
|
"learning_rate": 5.7979994751668964e-05,
|
|
"loss": 1.4577,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 1.44,
|
|
"learning_rate": 5.6310180918491364e-05,
|
|
"loss": 1.4836,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 1.47,
|
|
"learning_rate": 5.463318251194768e-05,
|
|
"loss": 1.4855,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 1.5,
|
|
"learning_rate": 5.2950908909636146e-05,
|
|
"loss": 1.498,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 1.53,
|
|
"learning_rate": 5.143393535088998e-05,
|
|
"loss": 1.5224,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 1.56,
|
|
"learning_rate": 4.974691896571781e-05,
|
|
"loss": 1.4979,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 1.59,
|
|
"learning_rate": 4.806019073064493e-05,
|
|
"loss": 1.4818,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 1.62,
|
|
"learning_rate": 4.6375671101346135e-05,
|
|
"loss": 1.4798,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 1.66,
|
|
"learning_rate": 4.4695278018847105e-05,
|
|
"loss": 1.4484,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 1.69,
|
|
"learning_rate": 4.302092472581729e-05,
|
|
"loss": 1.4578,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 1.72,
|
|
"learning_rate": 4.135451758821191e-05,
|
|
"loss": 1.4367,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 1.75,
|
|
"learning_rate": 3.969795392474383e-05,
|
|
"loss": 1.4777,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 1.78,
|
|
"learning_rate": 3.8053119846656026e-05,
|
|
"loss": 1.4458,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 1.81,
|
|
"learning_rate": 3.642188811025481e-05,
|
|
"loss": 1.4411,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 1.84,
|
|
"learning_rate": 3.4966944936695965e-05,
|
|
"loss": 1.4635,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 1.88,
|
|
"learning_rate": 3.336665992136068e-05,
|
|
"loss": 1.4476,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 1.91,
|
|
"learning_rate": 3.194254407457124e-05,
|
|
"loss": 1.4252,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 1.94,
|
|
"learning_rate": 3.037978169468993e-05,
|
|
"loss": 1.4093,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 1.97,
|
|
"learning_rate": 2.8839358277778755e-05,
|
|
"loss": 1.4478,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"learning_rate": 2.732302770145182e-05,
|
|
"loss": 1.4135,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_loss": 1.4583779573440552,
|
|
"eval_runtime": 38.3468,
|
|
"eval_samples_per_second": 266.933,
|
|
"eval_steps_per_second": 8.345,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 2.03,
|
|
"learning_rate": 2.5980357067871908e-05,
|
|
"loss": 1.3666,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 2.06,
|
|
"learning_rate": 2.451453499155258e-05,
|
|
"loss": 1.404,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 2.09,
|
|
"learning_rate": 2.307772986347724e-05,
|
|
"loss": 1.4156,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 2.12,
|
|
"learning_rate": 2.16715775847379e-05,
|
|
"loss": 1.3782,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 2.16,
|
|
"learning_rate": 2.029767915605719e-05,
|
|
"loss": 1.4042,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 2.19,
|
|
"learning_rate": 1.9090041275027743e-05,
|
|
"loss": 1.4143,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 2.22,
|
|
"learning_rate": 1.7781702958315426e-05,
|
|
"loss": 1.3693,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 2.25,
|
|
"learning_rate": 1.6510047380468512e-05,
|
|
"loss": 1.3926,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 2.28,
|
|
"learning_rate": 1.5276522408499567e-05,
|
|
"loss": 1.4257,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 2.31,
|
|
"learning_rate": 1.4082532495113626e-05,
|
|
"loss": 1.3812,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 2.34,
|
|
"learning_rate": 1.2929437079642415e-05,
|
|
"loss": 1.3969,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 2.38,
|
|
"learning_rate": 1.1818549040229643e-05,
|
|
"loss": 1.4128,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 2.41,
|
|
"learning_rate": 1.0751133199029163e-05,
|
|
"loss": 1.37,
|
|
"step": 770
|
|
},
|
|
{
|
|
"epoch": 2.44,
|
|
"learning_rate": 9.728404882118352e-06,
|
|
"loss": 1.3702,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 2.47,
|
|
"learning_rate": 8.751528535766102e-06,
|
|
"loss": 1.3934,
|
|
"step": 790
|
|
},
|
|
{
|
|
"epoch": 2.5,
|
|
"learning_rate": 7.821616400630865e-06,
|
|
"loss": 1.3615,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 2.53,
|
|
"learning_rate": 6.939727245398586e-06,
|
|
"loss": 1.3578,
|
|
"step": 810
|
|
},
|
|
{
|
|
"epoch": 2.56,
|
|
"learning_rate": 6.1068651613018614e-06,
|
|
"loss": 1.3887,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 2.59,
|
|
"learning_rate": 5.323978418893366e-06,
|
|
"loss": 1.3743,
|
|
"step": 830
|
|
},
|
|
{
|
|
"epoch": 2.62,
|
|
"learning_rate": 4.591958388374829e-06,
|
|
"loss": 1.368,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 2.66,
|
|
"learning_rate": 3.911638524710942e-06,
|
|
"loss": 1.3977,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 2.69,
|
|
"learning_rate": 3.2837934186838117e-06,
|
|
"loss": 1.3762,
|
|
"step": 860
|
|
},
|
|
{
|
|
"epoch": 2.72,
|
|
"learning_rate": 2.7091379149682685e-06,
|
|
"loss": 1.3469,
|
|
"step": 870
|
|
},
|
|
{
|
|
"epoch": 2.75,
|
|
"learning_rate": 2.1883262982322185e-06,
|
|
"loss": 1.4046,
|
|
"step": 880
|
|
},
|
|
{
|
|
"epoch": 2.78,
|
|
"learning_rate": 1.7219515481887217e-06,
|
|
"loss": 1.3557,
|
|
"step": 890
|
|
},
|
|
{
|
|
"epoch": 2.81,
|
|
"learning_rate": 1.3105446644479214e-06,
|
|
"loss": 1.3614,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 2.84,
|
|
"learning_rate": 9.545740619376741e-07,
|
|
"loss": 1.357,
|
|
"step": 910
|
|
},
|
|
{
|
|
"epoch": 2.88,
|
|
"learning_rate": 6.544450375809697e-07,
|
|
"loss": 1.3591,
|
|
"step": 920
|
|
},
|
|
{
|
|
"epoch": 2.91,
|
|
"learning_rate": 4.104993088376974e-07,
|
|
"loss": 1.3916,
|
|
"step": 930
|
|
},
|
|
{
|
|
"epoch": 2.94,
|
|
"learning_rate": 2.2301462463582557e-07,
|
|
"loss": 1.3442,
|
|
"step": 940
|
|
},
|
|
{
|
|
"epoch": 2.97,
|
|
"learning_rate": 9.220444913526916e-08,
|
|
"loss": 1.3638,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"learning_rate": 1.8217718684304664e-08,
|
|
"loss": 1.3797,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_loss": 1.4223334789276123,
|
|
"eval_runtime": 80.5484,
|
|
"eval_samples_per_second": 127.079,
|
|
"eval_steps_per_second": 3.973,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"step": 960,
|
|
"total_flos": 8.229739999768084e+17,
|
|
"train_loss": 1.5236846268177033,
|
|
"train_runtime": 2025.1348,
|
|
"train_samples_per_second": 60.652,
|
|
"train_steps_per_second": 0.474
|
|
}
|
|
],
|
|
"max_steps": 960,
|
|
"num_train_epochs": 3,
|
|
"total_flos": 8.229739999768084e+17,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|