generated from xuyuqing/ailab
740 lines
15 KiB
JSON
740 lines
15 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.9943714821763603,
|
|
"global_step": 1197,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.03,
|
|
"learning_rate": 0.0004999139013131293,
|
|
"loss": 1.515,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.05,
|
|
"learning_rate": 0.0004996556645563881,
|
|
"loss": 1.3699,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.08,
|
|
"learning_rate": 0.0004992254676005419,
|
|
"loss": 1.33,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.1,
|
|
"learning_rate": 0.0004986236067607343,
|
|
"loss": 1.2951,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.13,
|
|
"learning_rate": 0.0004978504965923896,
|
|
"loss": 1.3171,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.15,
|
|
"learning_rate": 0.0004969066696056699,
|
|
"loss": 1.2916,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.18,
|
|
"learning_rate": 0.0004957927758986887,
|
|
"loss": 1.3112,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.2,
|
|
"learning_rate": 0.0004945095827097302,
|
|
"loss": 1.2965,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.23,
|
|
"learning_rate": 0.0004930579738887826,
|
|
"loss": 1.3097,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.25,
|
|
"learning_rate": 0.0004914389492887529,
|
|
"loss": 1.3088,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.28,
|
|
"learning_rate": 0.0004896536240767774,
|
|
"loss": 1.2991,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.3,
|
|
"learning_rate": 0.00048770322796610734,
|
|
"loss": 1.2616,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.33,
|
|
"learning_rate": 0.0004855891043690947,
|
|
"loss": 1.2921,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.35,
|
|
"learning_rate": 0.0004833127094718643,
|
|
"loss": 1.2756,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.38,
|
|
"learning_rate": 0.0004808756112313076,
|
|
"loss": 1.2803,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.4,
|
|
"learning_rate": 0.00047827948829509107,
|
|
"loss": 1.2458,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.43,
|
|
"learning_rate": 0.00047552612884542065,
|
|
"loss": 1.2722,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 0.45,
|
|
"learning_rate": 0.0004726174293673612,
|
|
"loss": 1.2787,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 0.48,
|
|
"learning_rate": 0.0004695553933425571,
|
|
"loss": 1.2421,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 0.5,
|
|
"learning_rate": 0.0004663421298692557,
|
|
"loss": 1.2788,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.53,
|
|
"learning_rate": 0.0004629798522095818,
|
|
"loss": 1.2663,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 0.55,
|
|
"learning_rate": 0.00045947087626506655,
|
|
"loss": 1.2571,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 0.58,
|
|
"learning_rate": 0.0004558176189814786,
|
|
"loss": 1.2537,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 0.6,
|
|
"learning_rate": 0.00045202259668405736,
|
|
"loss": 1.2732,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 0.63,
|
|
"learning_rate": 0.00044808842334429443,
|
|
"loss": 1.2549,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.65,
|
|
"learning_rate": 0.00044401780877945763,
|
|
"loss": 1.2792,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 0.68,
|
|
"learning_rate": 0.0004398135567860972,
|
|
"loss": 1.2639,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 0.7,
|
|
"learning_rate": 0.0004354785632088204,
|
|
"loss": 1.2737,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 0.73,
|
|
"learning_rate": 0.00043101581394566405,
|
|
"loss": 1.2784,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 0.75,
|
|
"learning_rate": 0.0004264283828914392,
|
|
"loss": 1.2964,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.78,
|
|
"learning_rate": 0.00042171942982046495,
|
|
"loss": 1.2694,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 0.8,
|
|
"learning_rate": 0.00041689219821014885,
|
|
"loss": 1.2566,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 0.83,
|
|
"learning_rate": 0.0004119500130069138,
|
|
"loss": 1.2661,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 0.85,
|
|
"learning_rate": 0.00040689627833601007,
|
|
"loss": 1.2675,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 0.88,
|
|
"learning_rate": 0.0004017344751567892,
|
|
"loss": 1.2792,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 0.9,
|
|
"learning_rate": 0.00039646815886505623,
|
|
"loss": 1.2481,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 0.93,
|
|
"learning_rate": 0.00039110095684415005,
|
|
"loss": 1.2731,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 0.95,
|
|
"learning_rate": 0.00038563656596643986,
|
|
"loss": 1.2482,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 0.98,
|
|
"learning_rate": 0.00038007875004695884,
|
|
"loss": 1.2558,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"learning_rate": 0.0003744313372509272,
|
|
"loss": 1.2579,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 1.03,
|
|
"learning_rate": 0.00036869821745695255,
|
|
"loss": 1.2306,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 1.05,
|
|
"learning_rate": 0.0003628833395777224,
|
|
"loss": 1.2053,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 1.08,
|
|
"learning_rate": 0.0003569907088400344,
|
|
"loss": 1.2108,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 1.1,
|
|
"learning_rate": 0.0003510243840260384,
|
|
"loss": 1.2271,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 1.13,
|
|
"learning_rate": 0.00034498847467759,
|
|
"loss": 1.2261,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 1.15,
|
|
"learning_rate": 0.0003388871382656408,
|
|
"loss": 1.2163,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 1.18,
|
|
"learning_rate": 0.00033272457732661664,
|
|
"loss": 1.1958,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 1.2,
|
|
"learning_rate": 0.00032650503656775446,
|
|
"loss": 1.2287,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 1.23,
|
|
"learning_rate": 0.0003202327999433924,
|
|
"loss": 1.2047,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 1.25,
|
|
"learning_rate": 0.00031391218770422723,
|
|
"loss": 1.2249,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 1.28,
|
|
"learning_rate": 0.0003075475534215712,
|
|
"loss": 1.2079,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 1.3,
|
|
"learning_rate": 0.00030114328098865734,
|
|
"loss": 1.2218,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 1.33,
|
|
"learning_rate": 0.00029470378160106046,
|
|
"loss": 1.2151,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 1.35,
|
|
"learning_rate": 0.0002882334907183115,
|
|
"loss": 1.2239,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 1.38,
|
|
"learning_rate": 0.00028173686500879993,
|
|
"loss": 1.2242,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 1.4,
|
|
"learning_rate": 0.0002752183792800671,
|
|
"loss": 1.2392,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 1.43,
|
|
"learning_rate": 0.00026868252339660607,
|
|
"loss": 1.2022,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 1.45,
|
|
"learning_rate": 0.0002621337991872901,
|
|
"loss": 1.2065,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 1.48,
|
|
"learning_rate": 0.0002555767173445598,
|
|
"loss": 1.2032,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 1.5,
|
|
"learning_rate": 0.00024901579431750625,
|
|
"loss": 1.1886,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 1.53,
|
|
"learning_rate": 0.00024245554920098765,
|
|
"loss": 1.1859,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 1.55,
|
|
"learning_rate": 0.00023590050062292472,
|
|
"loss": 1.217,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 1.58,
|
|
"learning_rate": 0.00022935516363191695,
|
|
"loss": 1.1942,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 1.6,
|
|
"learning_rate": 0.00022282404658732484,
|
|
"loss": 1.1988,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 1.63,
|
|
"learning_rate": 0.00021631164805395906,
|
|
"loss": 1.204,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 1.65,
|
|
"learning_rate": 0.00020982245370351643,
|
|
"loss": 1.2348,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 1.68,
|
|
"learning_rate": 0.0002033609332248963,
|
|
"loss": 1.201,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 1.7,
|
|
"learning_rate": 0.00019693153724552586,
|
|
"loss": 1.2099,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 1.73,
|
|
"learning_rate": 0.00019053869426581473,
|
|
"loss": 1.2046,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 1.75,
|
|
"learning_rate": 0.00018418680760885027,
|
|
"loss": 1.2134,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 1.78,
|
|
"learning_rate": 0.0001778802523874349,
|
|
"loss": 1.2203,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 1.8,
|
|
"learning_rate": 0.00017162337249055476,
|
|
"loss": 1.1875,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 1.83,
|
|
"learning_rate": 0.00016542047759135391,
|
|
"loss": 1.2218,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 1.85,
|
|
"learning_rate": 0.0001592758401786774,
|
|
"loss": 1.2095,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 1.88,
|
|
"learning_rate": 0.0001531936926142254,
|
|
"loss": 1.2269,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 1.9,
|
|
"learning_rate": 0.00014717822421734717,
|
|
"loss": 1.2561,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 1.93,
|
|
"learning_rate": 0.00014123357837948175,
|
|
"loss": 1.1988,
|
|
"step": 770
|
|
},
|
|
{
|
|
"epoch": 1.95,
|
|
"learning_rate": 0.00013536384971023357,
|
|
"loss": 1.2115,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 1.98,
|
|
"learning_rate": 0.00012957308121704835,
|
|
"loss": 1.1932,
|
|
"step": 790
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"learning_rate": 0.00012386526152043188,
|
|
"loss": 1.1921,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 2.03,
|
|
"learning_rate": 0.0001182443221066303,
|
|
"loss": 1.1749,
|
|
"step": 810
|
|
},
|
|
{
|
|
"epoch": 2.05,
|
|
"learning_rate": 0.0001127141346196639,
|
|
"loss": 1.137,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 2.08,
|
|
"learning_rate": 0.00010727850819457877,
|
|
"loss": 1.1496,
|
|
"step": 830
|
|
},
|
|
{
|
|
"epoch": 2.1,
|
|
"learning_rate": 0.00010194118683375503,
|
|
"loss": 1.172,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 2.13,
|
|
"learning_rate": 9.670584682807734e-05,
|
|
"loss": 1.1567,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 2.15,
|
|
"learning_rate": 9.157609422474414e-05,
|
|
"loss": 1.1722,
|
|
"step": 860
|
|
},
|
|
{
|
|
"epoch": 2.18,
|
|
"learning_rate": 8.65554623434604e-05,
|
|
"loss": 1.1579,
|
|
"step": 870
|
|
},
|
|
{
|
|
"epoch": 2.2,
|
|
"learning_rate": 8.164740934272394e-05,
|
|
"loss": 1.1576,
|
|
"step": 880
|
|
},
|
|
{
|
|
"epoch": 2.23,
|
|
"learning_rate": 7.685531583788258e-05,
|
|
"loss": 1.1713,
|
|
"step": 890
|
|
},
|
|
{
|
|
"epoch": 2.25,
|
|
"learning_rate": 7.218248257260127e-05,
|
|
"loss": 1.1557,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 2.28,
|
|
"learning_rate": 6.763212814534483e-05,
|
|
"loss": 1.1481,
|
|
"step": 910
|
|
},
|
|
{
|
|
"epoch": 2.3,
|
|
"learning_rate": 6.320738679244117e-05,
|
|
"loss": 1.146,
|
|
"step": 920
|
|
},
|
|
{
|
|
"epoch": 2.33,
|
|
"learning_rate": 5.8911306229252086e-05,
|
|
"loss": 1.1391,
|
|
"step": 930
|
|
},
|
|
{
|
|
"epoch": 2.35,
|
|
"learning_rate": 5.4746845550939034e-05,
|
|
"loss": 1.1543,
|
|
"step": 940
|
|
},
|
|
{
|
|
"epoch": 2.38,
|
|
"learning_rate": 5.071687319426946e-05,
|
|
"loss": 1.1709,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 2.4,
|
|
"learning_rate": 4.682416496186751e-05,
|
|
"loss": 1.1822,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 2.43,
|
|
"learning_rate": 4.307140211027083e-05,
|
|
"loss": 1.175,
|
|
"step": 970
|
|
},
|
|
{
|
|
"epoch": 2.45,
|
|
"learning_rate": 3.94611695031086e-05,
|
|
"loss": 1.1583,
|
|
"step": 980
|
|
},
|
|
{
|
|
"epoch": 2.48,
|
|
"learning_rate": 3.5995953830674997e-05,
|
|
"loss": 1.129,
|
|
"step": 990
|
|
},
|
|
{
|
|
"epoch": 2.5,
|
|
"learning_rate": 3.267814189712304e-05,
|
|
"loss": 1.1302,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 2.53,
|
|
"learning_rate": 2.9510018976458775e-05,
|
|
"loss": 1.1624,
|
|
"step": 1010
|
|
},
|
|
{
|
|
"epoch": 2.55,
|
|
"learning_rate": 2.6493767238468747e-05,
|
|
"loss": 1.1701,
|
|
"step": 1020
|
|
},
|
|
{
|
|
"epoch": 2.58,
|
|
"learning_rate": 2.3631464245664204e-05,
|
|
"loss": 1.1586,
|
|
"step": 1030
|
|
},
|
|
{
|
|
"epoch": 2.6,
|
|
"learning_rate": 2.0925081522278068e-05,
|
|
"loss": 1.1663,
|
|
"step": 1040
|
|
},
|
|
{
|
|
"epoch": 2.63,
|
|
"learning_rate": 1.837648319629956e-05,
|
|
"loss": 1.1558,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 2.65,
|
|
"learning_rate": 1.5987424715482508e-05,
|
|
"loss": 1.1654,
|
|
"step": 1060
|
|
},
|
|
{
|
|
"epoch": 2.68,
|
|
"learning_rate": 1.3759551638211298e-05,
|
|
"loss": 1.1803,
|
|
"step": 1070
|
|
},
|
|
{
|
|
"epoch": 2.7,
|
|
"learning_rate": 1.1694398500057712e-05,
|
|
"loss": 1.1491,
|
|
"step": 1080
|
|
},
|
|
{
|
|
"epoch": 2.73,
|
|
"learning_rate": 9.793387756808796e-06,
|
|
"loss": 1.1729,
|
|
"step": 1090
|
|
},
|
|
{
|
|
"epoch": 2.75,
|
|
"learning_rate": 8.057828804694301e-06,
|
|
"loss": 1.1561,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 2.78,
|
|
"learning_rate": 6.4889170784883655e-06,
|
|
"loss": 1.1744,
|
|
"step": 1110
|
|
},
|
|
{
|
|
"epoch": 2.8,
|
|
"learning_rate": 5.087733228106517e-06,
|
|
"loss": 1.1827,
|
|
"step": 1120
|
|
},
|
|
{
|
|
"epoch": 2.83,
|
|
"learning_rate": 3.855242374265433e-06,
|
|
"loss": 1.1443,
|
|
"step": 1130
|
|
},
|
|
{
|
|
"epoch": 2.85,
|
|
"learning_rate": 2.7922934437178693e-06,
|
|
"loss": 1.1704,
|
|
"step": 1140
|
|
},
|
|
{
|
|
"epoch": 2.88,
|
|
"learning_rate": 1.899618584520868e-06,
|
|
"loss": 1.1619,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 2.9,
|
|
"learning_rate": 1.1778326617398473e-06,
|
|
"loss": 1.1543,
|
|
"step": 1160
|
|
},
|
|
{
|
|
"epoch": 2.93,
|
|
"learning_rate": 6.274328339360702e-07,
|
|
"loss": 1.1593,
|
|
"step": 1170
|
|
},
|
|
{
|
|
"epoch": 2.95,
|
|
"learning_rate": 2.4879821072892884e-07,
|
|
"loss": 1.1839,
|
|
"step": 1180
|
|
},
|
|
{
|
|
"epoch": 2.98,
|
|
"learning_rate": 4.218959166932268e-08,
|
|
"loss": 1.1736,
|
|
"step": 1190
|
|
},
|
|
{
|
|
"epoch": 2.99,
|
|
"step": 1197,
|
|
"total_flos": 2.0750632108299387e+18,
|
|
"train_loss": 1.2194481684749288,
|
|
"train_runtime": 3526.7352,
|
|
"train_samples_per_second": 43.511,
|
|
"train_steps_per_second": 0.339
|
|
}
|
|
],
|
|
"max_steps": 1197,
|
|
"num_train_epochs": 3,
|
|
"total_flos": 2.0750632108299387e+18,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|