generated from xuyuqing/ailab
474 lines
10 KiB
JSON
474 lines
10 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 19.79381443298969,
|
|
"global_step": 480,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.41,
|
|
"learning_rate": 2.6666666666666667e-05,
|
|
"loss": 1.6759,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.82,
|
|
"learning_rate": 4.999486510586282e-05,
|
|
"loss": 1.6266,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.99,
|
|
"eval_loss": 1.557953119277954,
|
|
"eval_runtime": 4.2802,
|
|
"eval_samples_per_second": 179.9,
|
|
"eval_steps_per_second": 5.841,
|
|
"step": 24
|
|
},
|
|
{
|
|
"epoch": 1.24,
|
|
"learning_rate": 4.9903636767551285e-05,
|
|
"loss": 1.5465,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.65,
|
|
"learning_rate": 4.969877884657107e-05,
|
|
"loss": 1.4942,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 1.98,
|
|
"eval_loss": 1.4629647731781006,
|
|
"eval_runtime": 4.2749,
|
|
"eval_samples_per_second": 180.121,
|
|
"eval_steps_per_second": 5.848,
|
|
"step": 48
|
|
},
|
|
{
|
|
"epoch": 2.06,
|
|
"learning_rate": 4.9418015319328204e-05,
|
|
"loss": 1.465,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.47,
|
|
"learning_rate": 4.900026145705815e-05,
|
|
"loss": 1.4334,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 2.89,
|
|
"learning_rate": 4.847299990646289e-05,
|
|
"loss": 1.427,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 2.97,
|
|
"eval_loss": 1.4183939695358276,
|
|
"eval_runtime": 4.2839,
|
|
"eval_samples_per_second": 179.743,
|
|
"eval_steps_per_second": 5.836,
|
|
"step": 72
|
|
},
|
|
{
|
|
"epoch": 3.3,
|
|
"learning_rate": 4.783863644106502e-05,
|
|
"loss": 1.4147,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 3.71,
|
|
"learning_rate": 4.710006551585946e-05,
|
|
"loss": 1.4066,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_loss": 1.3874717950820923,
|
|
"eval_runtime": 4.283,
|
|
"eval_samples_per_second": 179.779,
|
|
"eval_steps_per_second": 5.837,
|
|
"step": 97
|
|
},
|
|
{
|
|
"epoch": 4.12,
|
|
"learning_rate": 4.6260657060579185e-05,
|
|
"loss": 1.3815,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 4.54,
|
|
"learning_rate": 4.5324241103478804e-05,
|
|
"loss": 1.3686,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 4.95,
|
|
"learning_rate": 4.429509029579405e-05,
|
|
"loss": 1.3622,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 4.99,
|
|
"eval_loss": 1.3649791479110718,
|
|
"eval_runtime": 4.2778,
|
|
"eval_samples_per_second": 179.998,
|
|
"eval_steps_per_second": 5.844,
|
|
"step": 121
|
|
},
|
|
{
|
|
"epoch": 5.36,
|
|
"learning_rate": 4.317790041661424e-05,
|
|
"loss": 1.3596,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 5.77,
|
|
"learning_rate": 4.197776894711922e-05,
|
|
"loss": 1.3415,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 5.98,
|
|
"eval_loss": 1.345449447631836,
|
|
"eval_runtime": 4.2805,
|
|
"eval_samples_per_second": 179.886,
|
|
"eval_steps_per_second": 5.84,
|
|
"step": 145
|
|
},
|
|
{
|
|
"epoch": 6.19,
|
|
"learning_rate": 4.070017181194199e-05,
|
|
"loss": 1.3462,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 6.6,
|
|
"learning_rate": 3.935093839378049e-05,
|
|
"loss": 1.3366,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 6.97,
|
|
"eval_loss": 1.3287326097488403,
|
|
"eval_runtime": 4.2799,
|
|
"eval_samples_per_second": 179.912,
|
|
"eval_steps_per_second": 5.841,
|
|
"step": 169
|
|
},
|
|
{
|
|
"epoch": 7.01,
|
|
"learning_rate": 3.7936224935261434e-05,
|
|
"loss": 1.3361,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 7.42,
|
|
"learning_rate": 3.646248644941716e-05,
|
|
"loss": 1.3128,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 7.84,
|
|
"learning_rate": 3.493644726694171e-05,
|
|
"loss": 1.3099,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_loss": 1.315126895904541,
|
|
"eval_runtime": 4.2794,
|
|
"eval_samples_per_second": 179.93,
|
|
"eval_steps_per_second": 5.842,
|
|
"step": 194
|
|
},
|
|
{
|
|
"epoch": 8.25,
|
|
"learning_rate": 3.336507035461215e-05,
|
|
"loss": 1.3131,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 8.66,
|
|
"learning_rate": 3.175552554486822e-05,
|
|
"loss": 1.2987,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 8.99,
|
|
"eval_loss": 1.302283525466919,
|
|
"eval_runtime": 4.2831,
|
|
"eval_samples_per_second": 179.775,
|
|
"eval_steps_per_second": 5.837,
|
|
"step": 218
|
|
},
|
|
{
|
|
"epoch": 9.07,
|
|
"learning_rate": 3.011515682151135e-05,
|
|
"loss": 1.2823,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.48,
|
|
"learning_rate": 2.8451448810791014e-05,
|
|
"loss": 1.2985,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 9.9,
|
|
"learning_rate": 2.6771992630771824e-05,
|
|
"loss": 1.2839,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 9.98,
|
|
"eval_loss": 1.2930171489715576,
|
|
"eval_runtime": 4.2849,
|
|
"eval_samples_per_second": 179.701,
|
|
"eval_steps_per_second": 5.834,
|
|
"step": 242
|
|
},
|
|
{
|
|
"epoch": 10.31,
|
|
"learning_rate": 2.508445125480291e-05,
|
|
"loss": 1.2807,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 10.72,
|
|
"learning_rate": 2.3396524547128072e-05,
|
|
"loss": 1.2931,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 10.97,
|
|
"eval_loss": 1.284353494644165,
|
|
"eval_runtime": 4.278,
|
|
"eval_samples_per_second": 179.99,
|
|
"eval_steps_per_second": 5.844,
|
|
"step": 266
|
|
},
|
|
{
|
|
"epoch": 11.13,
|
|
"learning_rate": 2.1715914130171337e-05,
|
|
"loss": 1.2665,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 11.55,
|
|
"learning_rate": 2.0050288243800233e-05,
|
|
"loss": 1.2748,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 11.96,
|
|
"learning_rate": 1.840724675690608e-05,
|
|
"loss": 1.2611,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_loss": 1.2772635221481323,
|
|
"eval_runtime": 4.2757,
|
|
"eval_samples_per_second": 180.086,
|
|
"eval_steps_per_second": 5.847,
|
|
"step": 291
|
|
},
|
|
{
|
|
"epoch": 12.37,
|
|
"learning_rate": 1.6794286490945342e-05,
|
|
"loss": 1.275,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 12.78,
|
|
"learning_rate": 1.5218767013662816e-05,
|
|
"loss": 1.2529,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 12.99,
|
|
"eval_loss": 1.2720634937286377,
|
|
"eval_runtime": 4.2902,
|
|
"eval_samples_per_second": 179.479,
|
|
"eval_steps_per_second": 5.827,
|
|
"step": 315
|
|
},
|
|
{
|
|
"epoch": 13.2,
|
|
"learning_rate": 1.3687877059071966e-05,
|
|
"loss": 1.2542,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 13.61,
|
|
"learning_rate": 1.2208601726910446e-05,
|
|
"loss": 1.2523,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 13.98,
|
|
"eval_loss": 1.2669304609298706,
|
|
"eval_runtime": 4.2912,
|
|
"eval_samples_per_second": 179.436,
|
|
"eval_steps_per_second": 5.826,
|
|
"step": 339
|
|
},
|
|
{
|
|
"epoch": 14.02,
|
|
"learning_rate": 1.0787690611232118e-05,
|
|
"loss": 1.2478,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 14.43,
|
|
"learning_rate": 9.431627003557456e-06,
|
|
"loss": 1.2634,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 14.85,
|
|
"learning_rate": 8.146598311101317e-06,
|
|
"loss": 1.2506,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 14.97,
|
|
"eval_loss": 1.2636102437973022,
|
|
"eval_runtime": 4.2826,
|
|
"eval_samples_per_second": 179.797,
|
|
"eval_steps_per_second": 5.838,
|
|
"step": 363
|
|
},
|
|
{
|
|
"epoch": 15.26,
|
|
"learning_rate": 6.9384678250530355e-06,
|
|
"loss": 1.2417,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 15.67,
|
|
"learning_rate": 5.812747967723459e-06,
|
|
"loss": 1.2326,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_loss": 1.2610353231430054,
|
|
"eval_runtime": 4.2776,
|
|
"eval_samples_per_second": 180.009,
|
|
"eval_steps_per_second": 5.844,
|
|
"step": 388
|
|
},
|
|
{
|
|
"epoch": 16.08,
|
|
"learning_rate": 4.7745751406263165e-06,
|
|
"loss": 1.2385,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 16.49,
|
|
"learning_rate": 3.8286862882561916e-06,
|
|
"loss": 1.2405,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 16.91,
|
|
"learning_rate": 2.9793972844972512e-06,
|
|
"loss": 1.2299,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 16.99,
|
|
"eval_loss": 1.2595410346984863,
|
|
"eval_runtime": 4.2812,
|
|
"eval_samples_per_second": 179.856,
|
|
"eval_steps_per_second": 5.839,
|
|
"step": 412
|
|
},
|
|
{
|
|
"epoch": 17.32,
|
|
"learning_rate": 2.23058324028059e-06,
|
|
"loss": 1.235,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 17.73,
|
|
"learning_rate": 1.5856608223418806e-06,
|
|
"loss": 1.2439,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 17.98,
|
|
"eval_loss": 1.258914589881897,
|
|
"eval_runtime": 4.2816,
|
|
"eval_samples_per_second": 179.841,
|
|
"eval_steps_per_second": 5.839,
|
|
"step": 436
|
|
},
|
|
{
|
|
"epoch": 18.14,
|
|
"learning_rate": 1.0965000905360345e-06,
|
|
"loss": 1.234,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 18.56,
|
|
"learning_rate": 6.566760725097543e-07,
|
|
"loss": 1.2462,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 18.97,
|
|
"learning_rate": 3.279250581763982e-07,
|
|
"loss": 1.2403,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 18.97,
|
|
"eval_loss": 1.258617639541626,
|
|
"eval_runtime": 4.2813,
|
|
"eval_samples_per_second": 179.851,
|
|
"eval_steps_per_second": 5.839,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 19.38,
|
|
"learning_rate": 1.117470630119799e-07,
|
|
"loss": 1.2433,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 19.79,
|
|
"learning_rate": 9.128457625823261e-09,
|
|
"loss": 1.2311,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 19.79,
|
|
"eval_loss": 1.2585641145706177,
|
|
"eval_runtime": 4.2806,
|
|
"eval_samples_per_second": 179.883,
|
|
"eval_steps_per_second": 5.84,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 19.79,
|
|
"step": 480,
|
|
"total_flos": 8.185257973744927e+17,
|
|
"train_loss": 1.3218892614046733,
|
|
"train_runtime": 1249.4553,
|
|
"train_samples_per_second": 49.269,
|
|
"train_steps_per_second": 0.384
|
|
}
|
|
],
|
|
"max_steps": 480,
|
|
"num_train_epochs": 20,
|
|
"total_flos": 8.185257973744927e+17,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|