finetuned_models/my_internlm_model/trainer_state.json

260 lines
5.2 KiB
JSON

{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99812382739212,
"global_step": 399,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.992254676005419e-05,
"loss": 2.3336,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 4.969066696056699e-05,
"loss": 2.2307,
"step": 20
},
{
"epoch": 0.08,
"learning_rate": 4.9305797388878264e-05,
"loss": 2.1497,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 4.877032279661073e-05,
"loss": 2.0895,
"step": 40
},
{
"epoch": 0.13,
"learning_rate": 4.8087561123130764e-05,
"loss": 2.1052,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 4.726174293673612e-05,
"loss": 2.0591,
"step": 60
},
{
"epoch": 0.18,
"learning_rate": 4.629798522095818e-05,
"loss": 2.0869,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 4.520225966840574e-05,
"loss": 2.065,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 4.398135567860972e-05,
"loss": 2.078,
"step": 90
},
{
"epoch": 0.25,
"learning_rate": 4.264283828914392e-05,
"loss": 2.0751,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 4.119500130069138e-05,
"loss": 2.0628,
"step": 110
},
{
"epoch": 0.3,
"learning_rate": 3.964681588650562e-05,
"loss": 2.0149,
"step": 120
},
{
"epoch": 0.33,
"learning_rate": 3.800787500469589e-05,
"loss": 2.0432,
"step": 130
},
{
"epoch": 0.35,
"learning_rate": 3.628833395777224e-05,
"loss": 2.031,
"step": 140
},
{
"epoch": 0.38,
"learning_rate": 3.4498847467759e-05,
"loss": 2.0232,
"step": 150
},
{
"epoch": 0.4,
"learning_rate": 3.2650503656775446e-05,
"loss": 1.9852,
"step": 160
},
{
"epoch": 0.43,
"learning_rate": 3.075475534215712e-05,
"loss": 2.0334,
"step": 170
},
{
"epoch": 0.45,
"learning_rate": 2.882334907183115e-05,
"loss": 2.0359,
"step": 180
},
{
"epoch": 0.48,
"learning_rate": 2.686825233966061e-05,
"loss": 1.987,
"step": 190
},
{
"epoch": 0.5,
"learning_rate": 2.4901579431750625e-05,
"loss": 2.0376,
"step": 200
},
{
"epoch": 0.53,
"learning_rate": 2.2935516363191693e-05,
"loss": 2.0186,
"step": 210
},
{
"epoch": 0.55,
"learning_rate": 2.0982245370351645e-05,
"loss": 2.0056,
"step": 220
},
{
"epoch": 0.58,
"learning_rate": 1.9053869426581475e-05,
"loss": 1.9924,
"step": 230
},
{
"epoch": 0.6,
"learning_rate": 1.7162337249055477e-05,
"loss": 2.0295,
"step": 240
},
{
"epoch": 0.63,
"learning_rate": 1.531936926142254e-05,
"loss": 2.0122,
"step": 250
},
{
"epoch": 0.65,
"learning_rate": 1.3536384971023359e-05,
"loss": 2.049,
"step": 260
},
{
"epoch": 0.68,
"learning_rate": 1.182443221066303e-05,
"loss": 2.0183,
"step": 270
},
{
"epoch": 0.7,
"learning_rate": 1.0194118683375503e-05,
"loss": 2.0236,
"step": 280
},
{
"epoch": 0.73,
"learning_rate": 8.65554623434604e-06,
"loss": 2.0514,
"step": 290
},
{
"epoch": 0.75,
"learning_rate": 7.218248257260127e-06,
"loss": 2.0722,
"step": 300
},
{
"epoch": 0.78,
"learning_rate": 5.891130622925209e-06,
"loss": 2.0315,
"step": 310
},
{
"epoch": 0.8,
"learning_rate": 4.68241649618675e-06,
"loss": 2.0229,
"step": 320
},
{
"epoch": 0.83,
"learning_rate": 3.8057545285486053e-06,
"loss": 2.0299,
"step": 330
},
{
"epoch": 0.85,
"learning_rate": 2.8285175036914107e-06,
"loss": 2.0282,
"step": 340
},
{
"epoch": 0.88,
"learning_rate": 1.988660731133499e-06,
"loss": 2.0454,
"step": 350
},
{
"epoch": 0.9,
"learning_rate": 1.2913881811248696e-06,
"loss": 1.9951,
"step": 360
},
{
"epoch": 0.93,
"learning_rate": 7.410203351154083e-07,
"loss": 2.047,
"step": 370
},
{
"epoch": 0.95,
"learning_rate": 3.4096741493194197e-07,
"loss": 1.9984,
"step": 380
},
{
"epoch": 0.98,
"learning_rate": 9.370825215991308e-08,
"loss": 2.0279,
"step": 390
},
{
"epoch": 1.0,
"step": 399,
"total_flos": 3.1993772256945766e+17,
"train_loss": 2.0517517462708894,
"train_runtime": 920.588,
"train_samples_per_second": 55.563,
"train_steps_per_second": 0.433
}
],
"max_steps": 399,
"num_train_epochs": 1,
"total_flos": 3.1993772256945766e+17,
"trial_name": null,
"trial_params": null
}