generated from xuyuqing/ailab
398 lines
7.8 KiB
JSON
398 lines
7.8 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 9.92,
|
|
"global_step": 620,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.16,
|
|
"learning_rate": 0.0004996791267927632,
|
|
"loss": 1.0343,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.32,
|
|
"learning_rate": 0.0004987173308479738,
|
|
"loss": 0.9957,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.48,
|
|
"learning_rate": 0.0004971170810820279,
|
|
"loss": 0.9843,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.64,
|
|
"learning_rate": 0.0004948824853131237,
|
|
"loss": 0.9802,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.8,
|
|
"learning_rate": 0.000492019279716551,
|
|
"loss": 0.9644,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.96,
|
|
"learning_rate": 0.0004885348141000122,
|
|
"loss": 0.9649,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 1.12,
|
|
"learning_rate": 0.00048443803303677016,
|
|
"loss": 0.9325,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 1.28,
|
|
"learning_rate": 0.00047973945290505766,
|
|
"loss": 0.9155,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.44,
|
|
"learning_rate": 0.00047445113489268543,
|
|
"loss": 0.9204,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.6,
|
|
"learning_rate": 0.00046858665403614556,
|
|
"loss": 0.9111,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.76,
|
|
"learning_rate": 0.00046216106437368775,
|
|
"loss": 0.9169,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 1.92,
|
|
"learning_rate": 0.0004551908603018191,
|
|
"loss": 0.9059,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 2.08,
|
|
"learning_rate": 0.0004476939342344246,
|
|
"loss": 0.8727,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 2.24,
|
|
"learning_rate": 0.0004396895306731977,
|
|
"loss": 0.839,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 2.4,
|
|
"learning_rate": 0.00043119819680728,
|
|
"loss": 0.8502,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 2.56,
|
|
"learning_rate": 0.0004222417297689217,
|
|
"loss": 0.8507,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 2.72,
|
|
"learning_rate": 0.00041284312068055564,
|
|
"loss": 0.8369,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 2.88,
|
|
"learning_rate": 0.00040302649563691575,
|
|
"loss": 0.8427,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 3.04,
|
|
"learning_rate": 0.00039281705377369806,
|
|
"loss": 0.8262,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 3.2,
|
|
"learning_rate": 0.0003822410025817406,
|
|
"loss": 0.7666,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 3.36,
|
|
"learning_rate": 0.00037132549063277035,
|
|
"loss": 0.7582,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 3.52,
|
|
"learning_rate": 0.0003600985378894086,
|
|
"loss": 0.7601,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 3.68,
|
|
"learning_rate": 0.0003497518024650269,
|
|
"loss": 0.7741,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 3.84,
|
|
"learning_rate": 0.00033801311090533707,
|
|
"loss": 0.7825,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"learning_rate": 0.00032604849095224664,
|
|
"loss": 0.77,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 4.16,
|
|
"learning_rate": 0.0003138886556135773,
|
|
"loss": 0.6963,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 4.32,
|
|
"learning_rate": 0.0003015648190122457,
|
|
"loss": 0.6924,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 4.48,
|
|
"learning_rate": 0.0002903592840187234,
|
|
"loss": 0.6993,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 4.64,
|
|
"learning_rate": 0.00027781128227380593,
|
|
"loss": 0.7066,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 4.8,
|
|
"learning_rate": 0.000265191889366164,
|
|
"loss": 0.7021,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 4.96,
|
|
"learning_rate": 0.00025253349909640276,
|
|
"loss": 0.7012,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 5.12,
|
|
"learning_rate": 0.00023986860537079675,
|
|
"loss": 0.6495,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 5.28,
|
|
"learning_rate": 0.00022722971878989839,
|
|
"loss": 0.628,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 5.44,
|
|
"learning_rate": 0.00021464928319429267,
|
|
"loss": 0.6352,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 5.6,
|
|
"learning_rate": 0.00020215959238172344,
|
|
"loss": 0.6393,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 5.76,
|
|
"learning_rate": 0.00018979270720937812,
|
|
"loss": 0.6379,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 5.92,
|
|
"learning_rate": 0.000177580373294127,
|
|
"loss": 0.6438,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 6.08,
|
|
"learning_rate": 0.0001655539395219799,
|
|
"loss": 0.6145,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 6.24,
|
|
"learning_rate": 0.0001549146203368641,
|
|
"loss": 0.5775,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 6.4,
|
|
"learning_rate": 0.00014332599003225637,
|
|
"loss": 0.5826,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 6.56,
|
|
"learning_rate": 0.00013312951433543224,
|
|
"loss": 0.5835,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 6.72,
|
|
"learning_rate": 0.00012208601726910445,
|
|
"loss": 0.5882,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 6.88,
|
|
"learning_rate": 0.00011137087356189105,
|
|
"loss": 0.5918,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 7.04,
|
|
"learning_rate": 0.00010101158883401077,
|
|
"loss": 0.5791,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 7.2,
|
|
"learning_rate": 9.103475522078605e-05,
|
|
"loss": 0.5431,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 7.36,
|
|
"learning_rate": 8.146598311101316e-05,
|
|
"loss": 0.5388,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 7.52,
|
|
"learning_rate": 7.232983540546173e-05,
|
|
"loss": 0.5419,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 7.68,
|
|
"learning_rate": 6.36497644642601e-05,
|
|
"loss": 0.5547,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 7.84,
|
|
"learning_rate": 5.5448051905024056e-05,
|
|
"loss": 0.5502,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"learning_rate": 4.7745751406263163e-05,
|
|
"loss": 0.5561,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 8.16,
|
|
"learning_rate": 4.125704689189819e-05,
|
|
"loss": 0.5196,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 8.32,
|
|
"learning_rate": 3.455699981411259e-05,
|
|
"loss": 0.5271,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 8.48,
|
|
"learning_rate": 2.8409991827897968e-05,
|
|
"loss": 0.5235,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 8.64,
|
|
"learning_rate": 2.283180221459377e-05,
|
|
"loss": 0.5244,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 8.8,
|
|
"learning_rate": 1.830963680866285e-05,
|
|
"loss": 0.5254,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 8.96,
|
|
"learning_rate": 1.3850414819903234e-05,
|
|
"loss": 0.522,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 9.12,
|
|
"learning_rate": 9.997385429418554e-06,
|
|
"loss": 0.5163,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 9.28,
|
|
"learning_rate": 6.760439308393762e-06,
|
|
"loss": 0.5155,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 9.44,
|
|
"learning_rate": 4.147885651096861e-06,
|
|
"loss": 0.5095,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 9.6,
|
|
"learning_rate": 2.16643084529658e-06,
|
|
"loss": 0.5181,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 9.76,
|
|
"learning_rate": 8.211612570611926e-07,
|
|
"loss": 0.5158,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 9.92,
|
|
"learning_rate": 1.1553017412971323e-07,
|
|
"loss": 0.5156,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 9.92,
|
|
"step": 620,
|
|
"total_flos": 7.870629500231352e+17,
|
|
"train_loss": 0.703584014215777,
|
|
"train_runtime": 1791.5361,
|
|
"train_samples_per_second": 44.654,
|
|
"train_steps_per_second": 0.346
|
|
}
|
|
],
|
|
"max_steps": 620,
|
|
"num_train_epochs": 10,
|
|
"total_flos": 7.870629500231352e+17,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|