train: run qwen single once
This commit is contained in:
parent
cf1107fbfa
commit
429c1cd574
|
@ -0,0 +1,380 @@
|
|||
08/21/2024 05:58:22 - INFO - llamafactory.hparams.parser - Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, compute dtype: torch.bfloat16
|
||||
08/21/2024 05:58:23 - INFO - llamafactory.data.template - Add eos token: <|im_end|>
|
||||
08/21/2024 05:58:23 - INFO - llamafactory.data.template - Add pad token: <|im_end|>
|
||||
08/21/2024 05:58:23 - INFO - llamafactory.data.loader - Loading dataset AI-ModelScope/train_1M_CN...
|
||||
training example:
|
||||
input_ids:
|
||||
[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 104317, 89012, 22382, 106096, 64471, 101137, 72881, 102648, 46448, 1773, 62244, 107132, 37945, 99553, 25177, 101898, 8997, 100431, 99639, 113773, 9370, 111749, 25, 330, 100012, 105435, 99487, 100220, 3837, 104817, 44063, 99553, 102322, 20074, 33108, 116993, 3837, 23031, 104022, 100147, 101313, 1773, 698, 151645, 198, 151644, 77091, 198, 99487, 111749, 101137, 72881, 102648, 46448, 1773, 151645]
|
||||
inputs:
|
||||
<|im_start|>system
|
||||
You are a helpful assistant.<|im_end|>
|
||||
<|im_start|>user
|
||||
判断给定的文章是否符合语法规则。如果不符合,请提供修改建议。
|
||||
下面是一篇文章的开头: "为了探讨这个主题,本文将提供一系列数据和实例,以证明这一观点。"
|
||||
<|im_end|>
|
||||
<|im_start|>assistant
|
||||
这个开头符合语法规则。<|im_end|>
|
||||
label_ids:
|
||||
[-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 99487, 111749, 101137, 72881, 102648, 46448, 1773, 151645]
|
||||
labels:
|
||||
这个开头符合语法规则。<|im_end|>
|
||||
08/21/2024 05:59:58 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
||||
08/21/2024 05:59:58 - INFO - llamafactory.model.model_utils.attention - Using vanilla attention implementation.
|
||||
08/21/2024 05:59:58 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
||||
08/21/2024 05:59:58 - INFO - llamafactory.model.adapter - Fine-tuning method: LoRA
|
||||
08/21/2024 05:59:58 - INFO - llamafactory.model.model_utils.misc - Found linear modules: c_attn,w1,c_proj,w2
|
||||
08/21/2024 05:59:58 - INFO - llamafactory.model.loader - trainable params: 17,891,328 || all params: 7,739,215,872 || trainable%: 0.2312
|
||||
{'loss': 1.5198, 'grad_norm': 0.9780434966087341, 'learning_rate': 3e-06, 'epoch': 0.01, 'num_input_tokens_seen': 9808}
|
||||
{'loss': 1.5584, 'grad_norm': 1.3359601497650146, 'learning_rate': 6e-06, 'epoch': 0.01, 'num_input_tokens_seen': 19312}
|
||||
{'loss': 1.5898, 'grad_norm': 1.0262835025787354, 'learning_rate': 9e-06, 'epoch': 0.02, 'num_input_tokens_seen': 29232}
|
||||
{'loss': 1.8087, 'grad_norm': 0.6915128827095032, 'learning_rate': 1.2e-05, 'epoch': 0.02, 'num_input_tokens_seen': 37984}
|
||||
{'loss': 1.715, 'grad_norm': 0.7955412864685059, 'learning_rate': 1.5e-05, 'epoch': 0.03, 'num_input_tokens_seen': 44592}
|
||||
{'loss': 1.7853, 'grad_norm': 3.3742246627807617, 'learning_rate': 1.8e-05, 'epoch': 0.03, 'num_input_tokens_seen': 52400}
|
||||
{'loss': 1.5861, 'grad_norm': 0.7684027552604675, 'learning_rate': 2.1e-05, 'epoch': 0.04, 'num_input_tokens_seen': 60320}
|
||||
{'loss': 2.0417, 'grad_norm': 2.6426279544830322, 'learning_rate': 2.4e-05, 'epoch': 0.04, 'num_input_tokens_seen': 67024}
|
||||
{'loss': 1.6384, 'grad_norm': 2.341212749481201, 'learning_rate': 2.7000000000000002e-05, 'epoch': 0.05, 'num_input_tokens_seen': 73776}
|
||||
{'loss': 1.7021, 'grad_norm': 3.2077174186706543, 'learning_rate': 3e-05, 'epoch': 0.05, 'num_input_tokens_seen': 82592}
|
||||
{'loss': 1.6026, 'grad_norm': 3.0830605030059814, 'learning_rate': 3.3e-05, 'epoch': 0.06, 'num_input_tokens_seen': 90512}
|
||||
{'loss': 1.5229, 'grad_norm': 1.161834478378296, 'learning_rate': 3.6e-05, 'epoch': 0.06, 'num_input_tokens_seen': 96848}
|
||||
{'loss': 1.3826, 'grad_norm': 2.893949508666992, 'learning_rate': 3.9000000000000006e-05, 'epoch': 0.07, 'num_input_tokens_seen': 103728}
|
||||
{'loss': 1.348, 'grad_norm': 0.8465118408203125, 'learning_rate': 4.2e-05, 'epoch': 0.07, 'num_input_tokens_seen': 112160}
|
||||
{'loss': 1.5667, 'grad_norm': 1.7374404668807983, 'learning_rate': 4.5e-05, 'epoch': 0.08, 'num_input_tokens_seen': 117984}
|
||||
{'loss': 1.3849, 'grad_norm': 0.7276745438575745, 'learning_rate': 4.8e-05, 'epoch': 0.09, 'num_input_tokens_seen': 126624}
|
||||
{'loss': 1.578, 'grad_norm': 3.7653863430023193, 'learning_rate': 5.1000000000000006e-05, 'epoch': 0.09, 'num_input_tokens_seen': 134496}
|
||||
{'loss': 1.6441, 'grad_norm': 1.4671003818511963, 'learning_rate': 5.4000000000000005e-05, 'epoch': 0.1, 'num_input_tokens_seen': 141664}
|
||||
{'loss': 1.3386, 'grad_norm': 0.5093861222267151, 'learning_rate': 5.6999999999999996e-05, 'epoch': 0.1, 'num_input_tokens_seen': 149360}
|
||||
{'loss': 1.3779, 'grad_norm': 0.6295309066772461, 'learning_rate': 6e-05, 'epoch': 0.11, 'num_input_tokens_seen': 157776}
|
||||
{'loss': 1.4769, 'grad_norm': 1.3480464220046997, 'learning_rate': 6.3e-05, 'epoch': 0.11, 'num_input_tokens_seen': 165360}
|
||||
{'loss': 1.396, 'grad_norm': 1.3322535753250122, 'learning_rate': 6.6e-05, 'epoch': 0.12, 'num_input_tokens_seen': 173152}
|
||||
{'loss': 1.5464, 'grad_norm': 0.70087730884552, 'learning_rate': 6.9e-05, 'epoch': 0.12, 'num_input_tokens_seen': 181504}
|
||||
{'loss': 1.4307, 'grad_norm': 1.7856206893920898, 'learning_rate': 7.2e-05, 'epoch': 0.13, 'num_input_tokens_seen': 188880}
|
||||
{'loss': 1.4183, 'grad_norm': 0.5499105453491211, 'learning_rate': 7.500000000000001e-05, 'epoch': 0.13, 'num_input_tokens_seen': 195952}
|
||||
{'loss': 1.2132, 'grad_norm': 0.8403244614601135, 'learning_rate': 7.800000000000001e-05, 'epoch': 0.14, 'num_input_tokens_seen': 203744}
|
||||
{'loss': 1.1731, 'grad_norm': 0.43172359466552734, 'learning_rate': 8.1e-05, 'epoch': 0.14, 'num_input_tokens_seen': 212624}
|
||||
{'loss': 1.528, 'grad_norm': 2.4017887115478516, 'learning_rate': 8.4e-05, 'epoch': 0.15, 'num_input_tokens_seen': 219968}
|
||||
{'loss': 1.275, 'grad_norm': 0.5825865864753723, 'learning_rate': 8.7e-05, 'epoch': 0.15, 'num_input_tokens_seen': 229024}
|
||||
{'loss': 1.2145, 'grad_norm': 0.6666032075881958, 'learning_rate': 9e-05, 'epoch': 0.16, 'num_input_tokens_seen': 236768}
|
||||
{'loss': 1.3413, 'grad_norm': 0.8629510998725891, 'learning_rate': 9.300000000000001e-05, 'epoch': 0.17, 'num_input_tokens_seen': 245152}
|
||||
{'loss': 1.5684, 'grad_norm': 0.9067161679267883, 'learning_rate': 9.6e-05, 'epoch': 0.17, 'num_input_tokens_seen': 251728}
|
||||
{'loss': 1.3223, 'grad_norm': 0.6995197534561157, 'learning_rate': 9.900000000000001e-05, 'epoch': 0.18, 'num_input_tokens_seen': 260832}
|
||||
{'loss': 1.3921, 'grad_norm': 0.9108286499977112, 'learning_rate': 9.999878153526974e-05, 'epoch': 0.18, 'num_input_tokens_seen': 268448}
|
||||
{'loss': 1.258, 'grad_norm': 0.7171559929847717, 'learning_rate': 9.999238475781957e-05, 'epoch': 0.19, 'num_input_tokens_seen': 276016}
|
||||
{'loss': 1.3191, 'grad_norm': 0.8344643115997314, 'learning_rate': 9.998050575201771e-05, 'epoch': 0.19, 'num_input_tokens_seen': 282416}
|
||||
{'loss': 1.4481, 'grad_norm': 0.7680601477622986, 'learning_rate': 9.996314582053106e-05, 'epoch': 0.2, 'num_input_tokens_seen': 290016}
|
||||
{'loss': 1.5764, 'grad_norm': 1.1341261863708496, 'learning_rate': 9.99403068670717e-05, 'epoch': 0.2, 'num_input_tokens_seen': 297952}
|
||||
{'loss': 1.3542, 'grad_norm': 0.5555967092514038, 'learning_rate': 9.991199139618827e-05, 'epoch': 0.21, 'num_input_tokens_seen': 305152}
|
||||
{'loss': 1.2433, 'grad_norm': 0.7675139904022217, 'learning_rate': 9.987820251299122e-05, 'epoch': 0.21, 'num_input_tokens_seen': 312592}
|
||||
{'loss': 1.2361, 'grad_norm': 0.8082228302955627, 'learning_rate': 9.983894392281237e-05, 'epoch': 0.22, 'num_input_tokens_seen': 319792}
|
||||
{'loss': 1.4045, 'grad_norm': 0.3689567744731903, 'learning_rate': 9.979421993079852e-05, 'epoch': 0.22, 'num_input_tokens_seen': 327776}
|
||||
{'loss': 1.317, 'grad_norm': 3.3040738105773926, 'learning_rate': 9.974403544143941e-05, 'epoch': 0.23, 'num_input_tokens_seen': 336432}
|
||||
{'loss': 1.3257, 'grad_norm': 0.7294110059738159, 'learning_rate': 9.968839595802982e-05, 'epoch': 0.23, 'num_input_tokens_seen': 343216}
|
||||
{'loss': 1.2573, 'grad_norm': 0.9253833889961243, 'learning_rate': 9.962730758206611e-05, 'epoch': 0.24, 'num_input_tokens_seen': 350096}
|
||||
{'loss': 1.3345, 'grad_norm': 0.7265059947967529, 'learning_rate': 9.956077701257709e-05, 'epoch': 0.25, 'num_input_tokens_seen': 359040}
|
||||
{'loss': 1.2392, 'grad_norm': 12.522910118103027, 'learning_rate': 9.948881154538945e-05, 'epoch': 0.25, 'num_input_tokens_seen': 367568}
|
||||
{'loss': 1.2553, 'grad_norm': 0.8411867618560791, 'learning_rate': 9.941141907232765e-05, 'epoch': 0.26, 'num_input_tokens_seen': 375552}
|
||||
{'loss': 1.3964, 'grad_norm': 0.8228245377540588, 'learning_rate': 9.932860808034848e-05, 'epoch': 0.26, 'num_input_tokens_seen': 385712}
|
||||
{'loss': 1.1754, 'grad_norm': 0.7854481339454651, 'learning_rate': 9.924038765061042e-05, 'epoch': 0.27, 'num_input_tokens_seen': 393344}
|
||||
{'loss': 1.3174, 'grad_norm': 0.5448501110076904, 'learning_rate': 9.914676745747772e-05, 'epoch': 0.27, 'num_input_tokens_seen': 400720}
|
||||
{'loss': 1.2322, 'grad_norm': 0.6475867629051208, 'learning_rate': 9.904775776745958e-05, 'epoch': 0.28, 'num_input_tokens_seen': 409056}
|
||||
{'loss': 1.3971, 'grad_norm': 1.087993860244751, 'learning_rate': 9.894336943808426e-05, 'epoch': 0.28, 'num_input_tokens_seen': 416144}
|
||||
{'loss': 1.1801, 'grad_norm': 1.873576045036316, 'learning_rate': 9.88336139167084e-05, 'epoch': 0.29, 'num_input_tokens_seen': 422880}
|
||||
{'loss': 1.2315, 'grad_norm': 1.1034228801727295, 'learning_rate': 9.871850323926177e-05, 'epoch': 0.29, 'num_input_tokens_seen': 429440}
|
||||
{'loss': 1.2711, 'grad_norm': 0.5939464569091797, 'learning_rate': 9.859805002892732e-05, 'epoch': 0.3, 'num_input_tokens_seen': 437200}
|
||||
{'loss': 1.3031, 'grad_norm': 0.6036504507064819, 'learning_rate': 9.847226749475695e-05, 'epoch': 0.3, 'num_input_tokens_seen': 446416}
|
||||
{'loss': 1.4374, 'grad_norm': 0.9793163537979126, 'learning_rate': 9.834116943022298e-05, 'epoch': 0.31, 'num_input_tokens_seen': 454672}
|
||||
{'loss': 1.2568, 'grad_norm': 4.65339994430542, 'learning_rate': 9.820477021170551e-05, 'epoch': 0.31, 'num_input_tokens_seen': 463216}
|
||||
{'loss': 1.273, 'grad_norm': 0.7815749049186707, 'learning_rate': 9.806308479691595e-05, 'epoch': 0.32, 'num_input_tokens_seen': 472432}
|
||||
{'loss': 1.2393, 'grad_norm': 6.526480674743652, 'learning_rate': 9.791612872325667e-05, 'epoch': 0.33, 'num_input_tokens_seen': 479968}
|
||||
{'loss': 1.4561, 'grad_norm': 7.833691120147705, 'learning_rate': 9.776391810611718e-05, 'epoch': 0.33, 'num_input_tokens_seen': 488112}
|
||||
{'loss': 1.3804, 'grad_norm': 0.9743382930755615, 'learning_rate': 9.760646963710694e-05, 'epoch': 0.34, 'num_input_tokens_seen': 495808}
|
||||
{'loss': 1.3654, 'grad_norm': 1.1837691068649292, 'learning_rate': 9.744380058222483e-05, 'epoch': 0.34, 'num_input_tokens_seen': 503216}
|
||||
{'loss': 1.2941, 'grad_norm': 1.4183355569839478, 'learning_rate': 9.727592877996585e-05, 'epoch': 0.35, 'num_input_tokens_seen': 511568}
|
||||
{'loss': 1.5844, 'grad_norm': 0.6351439952850342, 'learning_rate': 9.710287263936484e-05, 'epoch': 0.35, 'num_input_tokens_seen': 519536}
|
||||
{'loss': 1.5176, 'grad_norm': 0.688452422618866, 'learning_rate': 9.69246511379778e-05, 'epoch': 0.36, 'num_input_tokens_seen': 526576}
|
||||
{'loss': 1.3372, 'grad_norm': 0.6690706014633179, 'learning_rate': 9.674128381980072e-05, 'epoch': 0.36, 'num_input_tokens_seen': 533824}
|
||||
{'loss': 1.2416, 'grad_norm': 0.40040481090545654, 'learning_rate': 9.655279079312642e-05, 'epoch': 0.37, 'num_input_tokens_seen': 541472}
|
||||
{'loss': 1.2045, 'grad_norm': 0.9019640684127808, 'learning_rate': 9.635919272833938e-05, 'epoch': 0.37, 'num_input_tokens_seen': 550144}
|
||||
{'loss': 1.371, 'grad_norm': 0.48580634593963623, 'learning_rate': 9.616051085564906e-05, 'epoch': 0.38, 'num_input_tokens_seen': 557872}
|
||||
{'loss': 1.2994, 'grad_norm': 0.7821425199508667, 'learning_rate': 9.595676696276172e-05, 'epoch': 0.38, 'num_input_tokens_seen': 563728}
|
||||
{'loss': 1.3101, 'grad_norm': 3.1828205585479736, 'learning_rate': 9.574798339249125e-05, 'epoch': 0.39, 'num_input_tokens_seen': 571056}
|
||||
{'loss': 1.1064, 'grad_norm': 0.765829861164093, 'learning_rate': 9.553418304030886e-05, 'epoch': 0.39, 'num_input_tokens_seen': 579184}
|
||||
{'loss': 1.4372, 'grad_norm': 1.0452457666397095, 'learning_rate': 9.53153893518325e-05, 'epoch': 0.4, 'num_input_tokens_seen': 586128}
|
||||
{'loss': 1.4349, 'grad_norm': 0.5598356127738953, 'learning_rate': 9.50916263202557e-05, 'epoch': 0.41, 'num_input_tokens_seen': 594848}
|
||||
{'loss': 1.3169, 'grad_norm': 0.3974337875843048, 'learning_rate': 9.486291848371643e-05, 'epoch': 0.41, 'num_input_tokens_seen': 602544}
|
||||
{'loss': 1.3912, 'grad_norm': 0.5899786353111267, 'learning_rate': 9.462929092260628e-05, 'epoch': 0.42, 'num_input_tokens_seen': 609904}
|
||||
{'loss': 1.1353, 'grad_norm': 0.4491591453552246, 'learning_rate': 9.439076925682006e-05, 'epoch': 0.42, 'num_input_tokens_seen': 618576}
|
||||
{'loss': 1.3647, 'grad_norm': 0.4541083872318268, 'learning_rate': 9.414737964294636e-05, 'epoch': 0.43, 'num_input_tokens_seen': 626960}
|
||||
{'loss': 1.359, 'grad_norm': 0.5013852715492249, 'learning_rate': 9.389914877139903e-05, 'epoch': 0.43, 'num_input_tokens_seen': 634896}
|
||||
{'loss': 1.2965, 'grad_norm': 0.41662266850471497, 'learning_rate': 9.364610386349049e-05, 'epoch': 0.44, 'num_input_tokens_seen': 642640}
|
||||
{'loss': 1.2079, 'grad_norm': 0.6631969809532166, 'learning_rate': 9.338827266844644e-05, 'epoch': 0.44, 'num_input_tokens_seen': 650816}
|
||||
{'loss': 1.2893, 'grad_norm': 0.587091326713562, 'learning_rate': 9.312568346036288e-05, 'epoch': 0.45, 'num_input_tokens_seen': 658736}
|
||||
{'loss': 1.3256, 'grad_norm': 0.6780284643173218, 'learning_rate': 9.285836503510562e-05, 'epoch': 0.45, 'num_input_tokens_seen': 666416}
|
||||
{'loss': 1.3449, 'grad_norm': 0.7384177446365356, 'learning_rate': 9.258634670715238e-05, 'epoch': 0.46, 'num_input_tokens_seen': 673312}
|
||||
{'loss': 1.3915, 'grad_norm': 1.2880659103393555, 'learning_rate': 9.230965830637821e-05, 'epoch': 0.46, 'num_input_tokens_seen': 681408}
|
||||
{'loss': 1.2321, 'grad_norm': 0.5819048285484314, 'learning_rate': 9.202833017478422e-05, 'epoch': 0.47, 'num_input_tokens_seen': 690608}
|
||||
{'loss': 1.3235, 'grad_norm': 0.4322358965873718, 'learning_rate': 9.174239316317033e-05, 'epoch': 0.47, 'num_input_tokens_seen': 698576}
|
||||
{'loss': 1.1704, 'grad_norm': 0.5658558011054993, 'learning_rate': 9.145187862775209e-05, 'epoch': 0.48, 'num_input_tokens_seen': 707808}
|
||||
{'loss': 1.234, 'grad_norm': 3.7345409393310547, 'learning_rate': 9.11568184267221e-05, 'epoch': 0.49, 'num_input_tokens_seen': 714016}
|
||||
{'loss': 1.2134, 'grad_norm': 0.3638668656349182, 'learning_rate': 9.085724491675642e-05, 'epoch': 0.49, 'num_input_tokens_seen': 721552}
|
||||
{'loss': 1.3759, 'grad_norm': 0.7395142316818237, 'learning_rate': 9.055319094946633e-05, 'epoch': 0.5, 'num_input_tokens_seen': 729104}
|
||||
{'loss': 1.3744, 'grad_norm': 0.4629284739494324, 'learning_rate': 9.02446898677957e-05, 'epoch': 0.5, 'num_input_tokens_seen': 736496}
|
||||
{'loss': 1.242, 'grad_norm': 0.5532721877098083, 'learning_rate': 8.993177550236464e-05, 'epoch': 0.51, 'num_input_tokens_seen': 745184}
|
||||
{'loss': 1.2122, 'grad_norm': 0.5109195709228516, 'learning_rate': 8.961448216775954e-05, 'epoch': 0.51, 'num_input_tokens_seen': 752048}
|
||||
{'loss': 1.2173, 'grad_norm': 10.432944297790527, 'learning_rate': 8.92928446587701e-05, 'epoch': 0.52, 'num_input_tokens_seen': 761088}
|
||||
{'loss': 1.2942, 'grad_norm': 0.9729847311973572, 'learning_rate': 8.896689824657372e-05, 'epoch': 0.52, 'num_input_tokens_seen': 768080}
|
||||
{'loss': 1.2633, 'grad_norm': 0.46336349844932556, 'learning_rate': 8.863667867486756e-05, 'epoch': 0.53, 'num_input_tokens_seen': 777808}
|
||||
{'loss': 1.3964, 'grad_norm': 0.5860999226570129, 'learning_rate': 8.83022221559489e-05, 'epoch': 0.53, 'num_input_tokens_seen': 786032}
|
||||
{'loss': 1.2115, 'grad_norm': 0.4330557882785797, 'learning_rate': 8.796356536674403e-05, 'epoch': 0.54, 'num_input_tokens_seen': 793424}
|
||||
{'loss': 1.1753, 'grad_norm': 0.5750731825828552, 'learning_rate': 8.762074544478623e-05, 'epoch': 0.54, 'num_input_tokens_seen': 801392}
|
||||
{'loss': 1.1468, 'grad_norm': 0.5364103317260742, 'learning_rate': 8.727379998414311e-05, 'epoch': 0.55, 'num_input_tokens_seen': 809184}
|
||||
{'loss': 1.359, 'grad_norm': 0.6141430735588074, 'learning_rate': 8.692276703129421e-05, 'epoch': 0.55, 'num_input_tokens_seen': 816896}
|
||||
{'loss': 1.3192, 'grad_norm': 0.3196038007736206, 'learning_rate': 8.656768508095853e-05, 'epoch': 0.56, 'num_input_tokens_seen': 825952}
|
||||
{'loss': 1.2414, 'grad_norm': 0.43610236048698425, 'learning_rate': 8.620859307187339e-05, 'epoch': 0.57, 'num_input_tokens_seen': 835664}
|
||||
{'loss': 1.2118, 'grad_norm': 0.47038909792900085, 'learning_rate': 8.584553038252414e-05, 'epoch': 0.57, 'num_input_tokens_seen': 844288}
|
||||
{'loss': 1.3021, 'grad_norm': 0.42498013377189636, 'learning_rate': 8.547853682682604e-05, 'epoch': 0.58, 'num_input_tokens_seen': 851584}
|
||||
{'loss': 1.3737, 'grad_norm': 0.6150634288787842, 'learning_rate': 8.510765264975813e-05, 'epoch': 0.58, 'num_input_tokens_seen': 861216}
|
||||
{'loss': 1.3771, 'grad_norm': 0.3102297782897949, 'learning_rate': 8.473291852294987e-05, 'epoch': 0.59, 'num_input_tokens_seen': 869792}
|
||||
{'loss': 1.1644, 'grad_norm': 0.35045167803764343, 'learning_rate': 8.435437554022115e-05, 'epoch': 0.59, 'num_input_tokens_seen': 877216}
|
||||
{'loss': 1.2409, 'grad_norm': 0.551223874092102, 'learning_rate': 8.397206521307584e-05, 'epoch': 0.6, 'num_input_tokens_seen': 883488}
|
||||
{'loss': 1.1997, 'grad_norm': 0.717923104763031, 'learning_rate': 8.358602946614951e-05, 'epoch': 0.6, 'num_input_tokens_seen': 890816}
|
||||
{'loss': 1.2954, 'grad_norm': 0.3854019045829773, 'learning_rate': 8.319631063261209e-05, 'epoch': 0.61, 'num_input_tokens_seen': 900352}
|
||||
{'loss': 1.187, 'grad_norm': 0.3597458600997925, 'learning_rate': 8.280295144952536e-05, 'epoch': 0.61, 'num_input_tokens_seen': 909312}
|
||||
{'loss': 1.1707, 'grad_norm': 0.36952605843544006, 'learning_rate': 8.240599505315655e-05, 'epoch': 0.62, 'num_input_tokens_seen': 917168}
|
||||
{'loss': 1.2333, 'grad_norm': 0.4055494964122772, 'learning_rate': 8.200548497424778e-05, 'epoch': 0.62, 'num_input_tokens_seen': 923424}
|
||||
{'loss': 1.3041, 'grad_norm': 0.8989991545677185, 'learning_rate': 8.160146513324254e-05, 'epoch': 0.63, 'num_input_tokens_seen': 931152}
|
||||
{'loss': 1.23, 'grad_norm': 0.4867976903915405, 'learning_rate': 8.119397983546932e-05, 'epoch': 0.63, 'num_input_tokens_seen': 940992}
|
||||
{'loss': 1.3671, 'grad_norm': 0.3175974190235138, 'learning_rate': 8.07830737662829e-05, 'epoch': 0.64, 'num_input_tokens_seen': 949936}
|
||||
{'loss': 1.3496, 'grad_norm': 0.45579057931900024, 'learning_rate': 8.036879198616434e-05, 'epoch': 0.65, 'num_input_tokens_seen': 958432}
|
||||
{'loss': 1.1982, 'grad_norm': 0.42308157682418823, 'learning_rate': 7.99511799257793e-05, 'epoch': 0.65, 'num_input_tokens_seen': 966688}
|
||||
{'loss': 1.1809, 'grad_norm': 0.5646951198577881, 'learning_rate': 7.953028338099627e-05, 'epoch': 0.66, 'num_input_tokens_seen': 973872}
|
||||
{'loss': 1.3528, 'grad_norm': 0.7943840622901917, 'learning_rate': 7.910614850786448e-05, 'epoch': 0.66, 'num_input_tokens_seen': 981264}
|
||||
{'loss': 1.4088, 'grad_norm': 0.46222275495529175, 'learning_rate': 7.86788218175523e-05, 'epoch': 0.67, 'num_input_tokens_seen': 988800}
|
||||
{'loss': 1.4252, 'grad_norm': 0.4380893409252167, 'learning_rate': 7.82483501712469e-05, 'epoch': 0.67, 'num_input_tokens_seen': 996656}
|
||||
{'loss': 1.2573, 'grad_norm': 0.62964928150177, 'learning_rate': 7.781478077501525e-05, 'epoch': 0.68, 'num_input_tokens_seen': 1004672}
|
||||
{'loss': 1.2001, 'grad_norm': 0.4584154784679413, 'learning_rate': 7.737816117462752e-05, 'epoch': 0.68, 'num_input_tokens_seen': 1012400}
|
||||
{'loss': 1.2168, 'grad_norm': 0.540849506855011, 'learning_rate': 7.693853925034315e-05, 'epoch': 0.69, 'num_input_tokens_seen': 1018768}
|
||||
{'loss': 1.3135, 'grad_norm': 0.810152530670166, 'learning_rate': 7.649596321166024e-05, 'epoch': 0.69, 'num_input_tokens_seen': 1026368}
|
||||
{'loss': 1.3444, 'grad_norm': 0.5237188935279846, 'learning_rate': 7.605048159202883e-05, 'epoch': 0.7, 'num_input_tokens_seen': 1033664}
|
||||
{'loss': 1.2206, 'grad_norm': 0.6217949986457825, 'learning_rate': 7.560214324352858e-05, 'epoch': 0.7, 'num_input_tokens_seen': 1042560}
|
||||
{'loss': 1.306, 'grad_norm': 0.5185115337371826, 'learning_rate': 7.515099733151177e-05, 'epoch': 0.71, 'num_input_tokens_seen': 1050336}
|
||||
{'loss': 1.3917, 'grad_norm': 0.6923243999481201, 'learning_rate': 7.469709332921155e-05, 'epoch': 0.71, 'num_input_tokens_seen': 1057824}
|
||||
{'loss': 1.4311, 'grad_norm': 0.6531165242195129, 'learning_rate': 7.424048101231686e-05, 'epoch': 0.72, 'num_input_tokens_seen': 1065184}
|
||||
{'loss': 1.3075, 'grad_norm': 0.4758189022541046, 'learning_rate': 7.378121045351378e-05, 'epoch': 0.73, 'num_input_tokens_seen': 1072016}
|
||||
{'loss': 1.2019, 'grad_norm': 0.7780120968818665, 'learning_rate': 7.331933201699457e-05, 'epoch': 0.73, 'num_input_tokens_seen': 1079584}
|
||||
{'loss': 1.1311, 'grad_norm': 0.5211575627326965, 'learning_rate': 7.285489635293472e-05, 'epoch': 0.74, 'num_input_tokens_seen': 1087984}
|
||||
{'loss': 1.1931, 'grad_norm': 0.48985904455184937, 'learning_rate': 7.238795439193848e-05, 'epoch': 0.74, 'num_input_tokens_seen': 1096032}
|
||||
{'loss': 1.1565, 'grad_norm': 0.3550620973110199, 'learning_rate': 7.191855733945387e-05, 'epoch': 0.75, 'num_input_tokens_seen': 1105696}
|
||||
{'loss': 1.2883, 'grad_norm': 0.5722794532775879, 'learning_rate': 7.14467566701573e-05, 'epoch': 0.75, 'num_input_tokens_seen': 1114048}
|
||||
{'loss': 1.4102, 'grad_norm': 0.474762886762619, 'learning_rate': 7.097260412230886e-05, 'epoch': 0.76, 'num_input_tokens_seen': 1122528}
|
||||
{'loss': 1.2941, 'grad_norm': 0.37182238698005676, 'learning_rate': 7.049615169207864e-05, 'epoch': 0.76, 'num_input_tokens_seen': 1131328}
|
||||
{'loss': 1.155, 'grad_norm': 0.37987005710601807, 'learning_rate': 7.001745162784477e-05, 'epoch': 0.77, 'num_input_tokens_seen': 1138992}
|
||||
{'loss': 1.3493, 'grad_norm': 0.5889148116111755, 'learning_rate': 6.953655642446368e-05, 'epoch': 0.77, 'num_input_tokens_seen': 1147776}
|
||||
{'loss': 1.2435, 'grad_norm': 0.6060062050819397, 'learning_rate': 6.905351881751372e-05, 'epoch': 0.78, 'num_input_tokens_seen': 1157360}
|
||||
{'loss': 1.2651, 'grad_norm': 1.8136910200119019, 'learning_rate': 6.856839177751176e-05, 'epoch': 0.78, 'num_input_tokens_seen': 1166256}
|
||||
{'loss': 1.3279, 'grad_norm': 0.45820876955986023, 'learning_rate': 6.808122850410461e-05, 'epoch': 0.79, 'num_input_tokens_seen': 1173840}
|
||||
{'loss': 1.3869, 'grad_norm': 0.656412661075592, 'learning_rate': 6.759208242023509e-05, 'epoch': 0.79, 'num_input_tokens_seen': 1180848}
|
||||
{'loss': 1.31, 'grad_norm': 0.5562692284584045, 'learning_rate': 6.710100716628344e-05, 'epoch': 0.8, 'num_input_tokens_seen': 1189952}
|
||||
{'loss': 1.225, 'grad_norm': 0.4766385853290558, 'learning_rate': 6.660805659418516e-05, 'epoch': 0.81, 'num_input_tokens_seen': 1198336}
|
||||
{'loss': 1.2687, 'grad_norm': 0.3756507635116577, 'learning_rate': 6.611328476152557e-05, 'epoch': 0.81, 'num_input_tokens_seen': 1206304}
|
||||
{'loss': 1.3772, 'grad_norm': 0.7269827723503113, 'learning_rate': 6.561674592561163e-05, 'epoch': 0.82, 'num_input_tokens_seen': 1214480}
|
||||
{'loss': 1.2164, 'grad_norm': 0.2886042892932892, 'learning_rate': 6.511849453752223e-05, 'epoch': 0.82, 'num_input_tokens_seen': 1222608}
|
||||
{'loss': 1.3669, 'grad_norm': 1.0119291543960571, 'learning_rate': 6.461858523613684e-05, 'epoch': 0.83, 'num_input_tokens_seen': 1229568}
|
||||
{'loss': 1.2246, 'grad_norm': 0.33208441734313965, 'learning_rate': 6.411707284214384e-05, 'epoch': 0.83, 'num_input_tokens_seen': 1236656}
|
||||
{'loss': 1.3515, 'grad_norm': 0.6497739553451538, 'learning_rate': 6.361401235202872e-05, 'epoch': 0.84, 'num_input_tokens_seen': 1243776}
|
||||
{'loss': 1.1246, 'grad_norm': 0.3101985454559326, 'learning_rate': 6.310945893204324e-05, 'epoch': 0.84, 'num_input_tokens_seen': 1253296}
|
||||
{'loss': 1.4245, 'grad_norm': 0.7302997708320618, 'learning_rate': 6.26034679121557e-05, 'epoch': 0.85, 'num_input_tokens_seen': 1261440}
|
||||
{'loss': 1.4892, 'grad_norm': 0.41763970255851746, 'learning_rate': 6.209609477998338e-05, 'epoch': 0.85, 'num_input_tokens_seen': 1268336}
|
||||
{'loss': 1.4348, 'grad_norm': 0.6407574415206909, 'learning_rate': 6.158739517470786e-05, 'epoch': 0.86, 'num_input_tokens_seen': 1276224}
|
||||
{'loss': 1.1242, 'grad_norm': 1.0497456789016724, 'learning_rate': 6.107742488097338e-05, 'epoch': 0.86, 'num_input_tokens_seen': 1284112}
|
||||
{'loss': 1.3794, 'grad_norm': 0.6208534240722656, 'learning_rate': 6.056623982276944e-05, 'epoch': 0.87, 'num_input_tokens_seen': 1292800}
|
||||
{'loss': 1.4105, 'grad_norm': 1.1972308158874512, 'learning_rate': 6.005389605729824e-05, 'epoch': 0.87, 'num_input_tokens_seen': 1300656}
|
||||
{'loss': 1.2852, 'grad_norm': 0.41444703936576843, 'learning_rate': 5.9540449768827246e-05, 'epoch': 0.88, 'num_input_tokens_seen': 1309792}
|
||||
{'loss': 1.2374, 'grad_norm': 0.5988168120384216, 'learning_rate': 5.902595726252801e-05, 'epoch': 0.89, 'num_input_tokens_seen': 1316000}
|
||||
{'eval_loss': 1.275710940361023, 'eval_runtime': 20.8891, 'eval_samples_per_second': 47.872, 'eval_steps_per_second': 23.936, 'epoch': 0.89, 'num_input_tokens_seen': 1321104}
|
||||
{'loss': 1.3322, 'grad_norm': 0.6610145568847656, 'learning_rate': 5.851047495830163e-05, 'epoch': 0.89, 'num_input_tokens_seen': 1323856}
|
||||
{'loss': 1.4102, 'grad_norm': 0.4503127634525299, 'learning_rate': 5.799405938459175e-05, 'epoch': 0.9, 'num_input_tokens_seen': 1332832}
|
||||
{'loss': 1.2225, 'grad_norm': 0.4584588408470154, 'learning_rate': 5.747676717218549e-05, 'epoch': 0.9, 'num_input_tokens_seen': 1339504}
|
||||
{'loss': 1.0999, 'grad_norm': 0.38683104515075684, 'learning_rate': 5.695865504800327e-05, 'epoch': 0.91, 'num_input_tokens_seen': 1347200}
|
||||
{'loss': 1.1401, 'grad_norm': 0.9537116289138794, 'learning_rate': 5.643977982887815e-05, 'epoch': 0.91, 'num_input_tokens_seen': 1354192}
|
||||
{'loss': 1.5087, 'grad_norm': 0.6801053881645203, 'learning_rate': 5.5920198415325064e-05, 'epoch': 0.92, 'num_input_tokens_seen': 1362864}
|
||||
{'loss': 1.3877, 'grad_norm': 0.6392530798912048, 'learning_rate': 5.539996778530115e-05, 'epoch': 0.92, 'num_input_tokens_seen': 1371616}
|
||||
{'loss': 1.2798, 'grad_norm': 0.5301526784896851, 'learning_rate': 5.487914498795747e-05, 'epoch': 0.93, 'num_input_tokens_seen': 1378160}
|
||||
{'loss': 1.2768, 'grad_norm': 0.3706102967262268, 'learning_rate': 5.435778713738292e-05, 'epoch': 0.93, 'num_input_tokens_seen': 1388624}
|
||||
{'loss': 1.2289, 'grad_norm': 0.7535234689712524, 'learning_rate': 5.383595140634093e-05, 'epoch': 0.94, 'num_input_tokens_seen': 1397216}
|
||||
{'loss': 1.1949, 'grad_norm': 1.009157657623291, 'learning_rate': 5.3313695020000024e-05, 'epoch': 0.94, 'num_input_tokens_seen': 1405616}
|
||||
{'loss': 1.0639, 'grad_norm': 1.248437523841858, 'learning_rate': 5.279107524965819e-05, 'epoch': 0.95, 'num_input_tokens_seen': 1413680}
|
||||
{'loss': 1.3181, 'grad_norm': 0.5594481229782104, 'learning_rate': 5.226814940646269e-05, 'epoch': 0.95, 'num_input_tokens_seen': 1421344}
|
||||
{'loss': 1.2697, 'grad_norm': 0.4510025382041931, 'learning_rate': 5.174497483512506e-05, 'epoch': 0.96, 'num_input_tokens_seen': 1430592}
|
||||
{'loss': 1.2928, 'grad_norm': 0.5174109935760498, 'learning_rate': 5.1221608907632665e-05, 'epoch': 0.97, 'num_input_tokens_seen': 1437920}
|
||||
{'loss': 1.3658, 'grad_norm': 0.6793380379676819, 'learning_rate': 5.0698109016957274e-05, 'epoch': 0.97, 'num_input_tokens_seen': 1445824}
|
||||
{'loss': 1.1596, 'grad_norm': 0.3408694863319397, 'learning_rate': 5.017453257076119e-05, 'epoch': 0.98, 'num_input_tokens_seen': 1453936}
|
||||
{'loss': 1.3295, 'grad_norm': 0.5417718887329102, 'learning_rate': 4.965093698510193e-05, 'epoch': 0.98, 'num_input_tokens_seen': 1461840}
|
||||
{'loss': 1.1869, 'grad_norm': 0.6786752343177795, 'learning_rate': 4.912737967813583e-05, 'epoch': 0.99, 'num_input_tokens_seen': 1469056}
|
||||
{'loss': 1.2946, 'grad_norm': 0.6146776080131531, 'learning_rate': 4.860391806382157e-05, 'epoch': 0.99, 'num_input_tokens_seen': 1476144}
|
||||
{'loss': 1.226, 'grad_norm': 0.5054582953453064, 'learning_rate': 4.8080609545624004e-05, 'epoch': 1.0, 'num_input_tokens_seen': 1483664}
|
||||
{'loss': 1.3204, 'grad_norm': 0.33809226751327515, 'learning_rate': 4.755751151021934e-05, 'epoch': 1.0, 'num_input_tokens_seen': 1492768}
|
||||
{'loss': 1.2531, 'grad_norm': 0.45875921845436096, 'learning_rate': 4.703468132120193e-05, 'epoch': 1.01, 'num_input_tokens_seen': 1499904}
|
||||
{'loss': 1.3849, 'grad_norm': 0.4520933926105499, 'learning_rate': 4.6512176312793736e-05, 'epoch': 1.01, 'num_input_tokens_seen': 1508704}
|
||||
{'loss': 1.1251, 'grad_norm': 0.5095769762992859, 'learning_rate': 4.599005378355706e-05, 'epoch': 1.02, 'num_input_tokens_seen': 1517328}
|
||||
{'loss': 1.0598, 'grad_norm': 0.4004427492618561, 'learning_rate': 4.5468370990111006e-05, 'epoch': 1.02, 'num_input_tokens_seen': 1524384}
|
||||
{'loss': 1.3841, 'grad_norm': 0.5377769470214844, 'learning_rate': 4.494718514085268e-05, 'epoch': 1.03, 'num_input_tokens_seen': 1532032}
|
||||
{'loss': 1.2045, 'grad_norm': 0.3413529396057129, 'learning_rate': 4.442655338968373e-05, 'epoch': 1.03, 'num_input_tokens_seen': 1542432}
|
||||
{'loss': 1.1681, 'grad_norm': 0.4114669859409332, 'learning_rate': 4.390653282974264e-05, 'epoch': 1.04, 'num_input_tokens_seen': 1549104}
|
||||
{'loss': 1.1828, 'grad_norm': 0.3440811336040497, 'learning_rate': 4.3387180487143876e-05, 'epoch': 1.05, 'num_input_tokens_seen': 1558432}
|
||||
{'loss': 1.3796, 'grad_norm': 0.8090496063232422, 'learning_rate': 4.2868553314724425e-05, 'epoch': 1.05, 'num_input_tokens_seen': 1565360}
|
||||
{'loss': 1.3221, 'grad_norm': 0.5015954971313477, 'learning_rate': 4.23507081857981e-05, 'epoch': 1.06, 'num_input_tokens_seen': 1573200}
|
||||
{'loss': 1.2565, 'grad_norm': 0.28652992844581604, 'learning_rate': 4.1833701887918904e-05, 'epoch': 1.06, 'num_input_tokens_seen': 1581824}
|
||||
{'loss': 1.1947, 'grad_norm': 0.538189709186554, 'learning_rate': 4.131759111665349e-05, 'epoch': 1.07, 'num_input_tokens_seen': 1589088}
|
||||
{'loss': 1.3702, 'grad_norm': 2.3863205909729004, 'learning_rate': 4.080243246936399e-05, 'epoch': 1.07, 'num_input_tokens_seen': 1596080}
|
||||
{'loss': 1.182, 'grad_norm': 0.6321094036102295, 'learning_rate': 4.028828243900141e-05, 'epoch': 1.08, 'num_input_tokens_seen': 1602928}
|
||||
{'loss': 1.3002, 'grad_norm': 0.47681689262390137, 'learning_rate': 3.9775197407910485e-05, 'epoch': 1.08, 'num_input_tokens_seen': 1611552}
|
||||
{'loss': 1.361, 'grad_norm': 0.504429042339325, 'learning_rate': 3.926323364164684e-05, 'epoch': 1.09, 'num_input_tokens_seen': 1621312}
|
||||
{'loss': 1.4355, 'grad_norm': 0.34117618203163147, 'learning_rate': 3.875244728280676e-05, 'epoch': 1.09, 'num_input_tokens_seen': 1630144}
|
||||
{'loss': 1.1478, 'grad_norm': 0.3532378375530243, 'learning_rate': 3.82428943448705e-05, 'epoch': 1.1, 'num_input_tokens_seen': 1638288}
|
||||
{'loss': 1.1475, 'grad_norm': 0.8341960310935974, 'learning_rate': 3.773463070605987e-05, 'epoch': 1.1, 'num_input_tokens_seen': 1644672}
|
||||
{'loss': 1.1921, 'grad_norm': 0.6224629282951355, 'learning_rate': 3.7227712103210486e-05, 'epoch': 1.11, 'num_input_tokens_seen': 1651664}
|
||||
{'loss': 1.1563, 'grad_norm': 1.117788553237915, 'learning_rate': 3.6722194125659556e-05, 'epoch': 1.11, 'num_input_tokens_seen': 1657872}
|
||||
{'loss': 1.2685, 'grad_norm': 0.5773010849952698, 'learning_rate': 3.6218132209150045e-05, 'epoch': 1.12, 'num_input_tokens_seen': 1666688}
|
||||
{'loss': 1.2926, 'grad_norm': 0.3286520540714264, 'learning_rate': 3.5715581629751326e-05, 'epoch': 1.13, 'num_input_tokens_seen': 1675840}
|
||||
{'loss': 1.2786, 'grad_norm': 0.5894845724105835, 'learning_rate': 3.5214597497797684e-05, 'epoch': 1.13, 'num_input_tokens_seen': 1686816}
|
||||
{'loss': 1.2671, 'grad_norm': 0.6024913787841797, 'learning_rate': 3.471523475184472e-05, 'epoch': 1.14, 'num_input_tokens_seen': 1695904}
|
||||
{'loss': 1.2902, 'grad_norm': 1.0095946788787842, 'learning_rate': 3.4217548152644885e-05, 'epoch': 1.14, 'num_input_tokens_seen': 1704112}
|
||||
{'loss': 1.2884, 'grad_norm': 0.5328789949417114, 'learning_rate': 3.372159227714218e-05, 'epoch': 1.15, 'num_input_tokens_seen': 1712240}
|
||||
{'loss': 1.2189, 'grad_norm': 0.4846518933773041, 'learning_rate': 3.322742151248725e-05, 'epoch': 1.15, 'num_input_tokens_seen': 1720368}
|
||||
{'loss': 1.2421, 'grad_norm': 0.6126274466514587, 'learning_rate': 3.273509005007327e-05, 'epoch': 1.16, 'num_input_tokens_seen': 1728752}
|
||||
{'loss': 1.2702, 'grad_norm': 0.5475163459777832, 'learning_rate': 3.224465187959316e-05, 'epoch': 1.16, 'num_input_tokens_seen': 1734816}
|
||||
{'loss': 1.2804, 'grad_norm': 0.4265217185020447, 'learning_rate': 3.1756160783119016e-05, 'epoch': 1.17, 'num_input_tokens_seen': 1744720}
|
||||
{'loss': 1.3908, 'grad_norm': 0.6626904606819153, 'learning_rate': 3.12696703292044e-05, 'epoch': 1.17, 'num_input_tokens_seen': 1751344}
|
||||
{'loss': 1.2881, 'grad_norm': 0.6573454737663269, 'learning_rate': 3.078523386700982e-05, 'epoch': 1.18, 'num_input_tokens_seen': 1757920}
|
||||
{'loss': 1.322, 'grad_norm': 0.6659638285636902, 'learning_rate': 3.0302904520452447e-05, 'epoch': 1.18, 'num_input_tokens_seen': 1765968}
|
||||
{'loss': 1.1882, 'grad_norm': 0.8781021237373352, 'learning_rate': 2.9822735182380496e-05, 'epoch': 1.19, 'num_input_tokens_seen': 1774048}
|
||||
{'loss': 1.2558, 'grad_norm': 0.5075053572654724, 'learning_rate': 2.934477850877292e-05, 'epoch': 1.19, 'num_input_tokens_seen': 1782208}
|
||||
{'loss': 1.3037, 'grad_norm': 0.5382062196731567, 'learning_rate': 2.886908691296504e-05, 'epoch': 1.2, 'num_input_tokens_seen': 1789760}
|
||||
{'loss': 1.2251, 'grad_norm': 0.5306540727615356, 'learning_rate': 2.8395712559900877e-05, 'epoch': 1.21, 'num_input_tokens_seen': 1797856}
|
||||
{'loss': 1.3007, 'grad_norm': 1.034726619720459, 'learning_rate': 2.7924707360412746e-05, 'epoch': 1.21, 'num_input_tokens_seen': 1804208}
|
||||
{'loss': 1.4622, 'grad_norm': 1.0847396850585938, 'learning_rate': 2.7456122965528475e-05, 'epoch': 1.22, 'num_input_tokens_seen': 1811376}
|
||||
{'loss': 1.2426, 'grad_norm': 0.4761122763156891, 'learning_rate': 2.699001076080742e-05, 'epoch': 1.22, 'num_input_tokens_seen': 1821040}
|
||||
{'loss': 1.2246, 'grad_norm': 0.5356981754302979, 'learning_rate': 2.6526421860705473e-05, 'epoch': 1.23, 'num_input_tokens_seen': 1827120}
|
||||
{'loss': 1.3284, 'grad_norm': 0.349098801612854, 'learning_rate': 2.6065407102969664e-05, 'epoch': 1.23, 'num_input_tokens_seen': 1835408}
|
||||
{'loss': 1.2983, 'grad_norm': 0.519724428653717, 'learning_rate': 2.560701704306336e-05, 'epoch': 1.24, 'num_input_tokens_seen': 1842288}
|
||||
{'loss': 1.2629, 'grad_norm': 0.3109349012374878, 'learning_rate': 2.5151301948622237e-05, 'epoch': 1.24, 'num_input_tokens_seen': 1852576}
|
||||
{'loss': 1.1546, 'grad_norm': 0.6654074788093567, 'learning_rate': 2.469831179394182e-05, 'epoch': 1.25, 'num_input_tokens_seen': 1858368}
|
||||
{'loss': 1.1971, 'grad_norm': 0.36677348613739014, 'learning_rate': 2.4248096254497288e-05, 'epoch': 1.25, 'num_input_tokens_seen': 1867648}
|
||||
{'loss': 1.213, 'grad_norm': 0.6804356575012207, 'learning_rate': 2.3800704701496053e-05, 'epoch': 1.26, 'num_input_tokens_seen': 1875280}
|
||||
{'loss': 1.2494, 'grad_norm': 0.44879263639450073, 'learning_rate': 2.33561861964635e-05, 'epoch': 1.26, 'num_input_tokens_seen': 1884192}
|
||||
{'loss': 1.1249, 'grad_norm': 0.5840603709220886, 'learning_rate': 2.2914589485863014e-05, 'epoch': 1.27, 'num_input_tokens_seen': 1892208}
|
||||
{'loss': 1.1762, 'grad_norm': 0.5910426378250122, 'learning_rate': 2.247596299575022e-05, 'epoch': 1.27, 'num_input_tokens_seen': 1900432}
|
||||
{'loss': 1.3507, 'grad_norm': 0.4976474940776825, 'learning_rate': 2.2040354826462668e-05, 'epoch': 1.28, 'num_input_tokens_seen': 1907104}
|
||||
{'loss': 1.1874, 'grad_norm': 0.6093457937240601, 'learning_rate': 2.160781274734495e-05, 'epoch': 1.29, 'num_input_tokens_seen': 1917024}
|
||||
{'loss': 1.3059, 'grad_norm': 0.4481809437274933, 'learning_rate': 2.117838419151034e-05, 'epoch': 1.29, 'num_input_tokens_seen': 1924096}
|
||||
{'loss': 1.2108, 'grad_norm': 0.5955674052238464, 'learning_rate': 2.0752116250639225e-05, 'epoch': 1.3, 'num_input_tokens_seen': 1931488}
|
||||
{'loss': 1.3217, 'grad_norm': 0.6927450895309448, 'learning_rate': 2.0329055669814934e-05, 'epoch': 1.3, 'num_input_tokens_seen': 1939856}
|
||||
{'loss': 1.0395, 'grad_norm': 1.1952440738677979, 'learning_rate': 1.9909248842397584e-05, 'epoch': 1.31, 'num_input_tokens_seen': 1946528}
|
||||
{'loss': 1.1858, 'grad_norm': 0.7097569704055786, 'learning_rate': 1.9492741804936622e-05, 'epoch': 1.31, 'num_input_tokens_seen': 1953728}
|
||||
{'loss': 1.2912, 'grad_norm': 0.48777928948402405, 'learning_rate': 1.9079580232122303e-05, 'epoch': 1.32, 'num_input_tokens_seen': 1961488}
|
||||
{'loss': 1.1579, 'grad_norm': 0.5239306688308716, 'learning_rate': 1.866980943177699e-05, 'epoch': 1.32, 'num_input_tokens_seen': 1968480}
|
||||
{'loss': 1.1789, 'grad_norm': 0.4721810221672058, 'learning_rate': 1.8263474339886628e-05, 'epoch': 1.33, 'num_input_tokens_seen': 1977680}
|
||||
{'loss': 1.2551, 'grad_norm': 0.767265260219574, 'learning_rate': 1.7860619515673033e-05, 'epoch': 1.33, 'num_input_tokens_seen': 1985072}
|
||||
{'loss': 1.2931, 'grad_norm': 0.7955953478813171, 'learning_rate': 1.746128913670746e-05, 'epoch': 1.34, 'num_input_tokens_seen': 1991744}
|
||||
{'loss': 1.2748, 'grad_norm': 0.5249313712120056, 'learning_rate': 1.7065526994065973e-05, 'epoch': 1.34, 'num_input_tokens_seen': 1999136}
|
||||
{'loss': 1.2862, 'grad_norm': 0.5865403413772583, 'learning_rate': 1.667337648752738e-05, 'epoch': 1.35, 'num_input_tokens_seen': 2007056}
|
||||
{'loss': 1.3582, 'grad_norm': 0.5127578973770142, 'learning_rate': 1.6284880620813848e-05, 'epoch': 1.35, 'num_input_tokens_seen': 2015360}
|
||||
{'loss': 1.1798, 'grad_norm': 0.5742343664169312, 'learning_rate': 1.5900081996875083e-05, 'epoch': 1.36, 'num_input_tokens_seen': 2023600}
|
||||
{'loss': 1.1198, 'grad_norm': 0.3727271556854248, 'learning_rate': 1.551902281321651e-05, 'epoch': 1.37, 'num_input_tokens_seen': 2031200}
|
||||
{'loss': 1.2644, 'grad_norm': 0.8866073489189148, 'learning_rate': 1.5141744857271778e-05, 'epoch': 1.37, 'num_input_tokens_seen': 2038272}
|
||||
{'loss': 1.383, 'grad_norm': 0.6852172017097473, 'learning_rate': 1.4768289501820265e-05, 'epoch': 1.38, 'num_input_tokens_seen': 2045584}
|
||||
{'loss': 1.2255, 'grad_norm': 0.5909332036972046, 'learning_rate': 1.439869770045018e-05, 'epoch': 1.38, 'num_input_tokens_seen': 2052272}
|
||||
{'loss': 1.3218, 'grad_norm': 0.3967475891113281, 'learning_rate': 1.4033009983067452e-05, 'epoch': 1.39, 'num_input_tokens_seen': 2059296}
|
||||
{'loss': 1.2757, 'grad_norm': 0.9393104910850525, 'learning_rate': 1.367126645145121e-05, 'epoch': 1.39, 'num_input_tokens_seen': 2066624}
|
||||
{'loss': 1.3221, 'grad_norm': 0.5009010434150696, 'learning_rate': 1.3313506774856177e-05, 'epoch': 1.4, 'num_input_tokens_seen': 2075472}
|
||||
{'loss': 1.3386, 'grad_norm': 0.6973915100097656, 'learning_rate': 1.29597701856625e-05, 'epoch': 1.4, 'num_input_tokens_seen': 2084000}
|
||||
{'loss': 1.2699, 'grad_norm': 0.5194016695022583, 'learning_rate': 1.2610095475073414e-05, 'epoch': 1.41, 'num_input_tokens_seen': 2092592}
|
||||
{'loss': 1.0779, 'grad_norm': 0.5616266131401062, 'learning_rate': 1.22645209888614e-05, 'epoch': 1.41, 'num_input_tokens_seen': 2099456}
|
||||
{'loss': 1.2973, 'grad_norm': 0.6077366471290588, 'learning_rate': 1.1923084623163172e-05, 'epoch': 1.42, 'num_input_tokens_seen': 2107648}
|
||||
{'loss': 1.3005, 'grad_norm': 0.5598793625831604, 'learning_rate': 1.1585823820323843e-05, 'epoch': 1.42, 'num_input_tokens_seen': 2114960}
|
||||
{'loss': 1.1668, 'grad_norm': 0.7757420539855957, 'learning_rate': 1.1252775564791024e-05, 'epoch': 1.43, 'num_input_tokens_seen': 2123216}
|
||||
{'loss': 1.2009, 'grad_norm': 0.5686850547790527, 'learning_rate': 1.0923976379059058e-05, 'epoch': 1.43, 'num_input_tokens_seen': 2131392}
|
||||
{'loss': 1.103, 'grad_norm': 0.7514450550079346, 'learning_rate': 1.0599462319663905e-05, 'epoch': 1.44, 'num_input_tokens_seen': 2139360}
|
||||
{'loss': 1.2085, 'grad_norm': 0.6718158721923828, 'learning_rate': 1.0279268973229089e-05, 'epoch': 1.45, 'num_input_tokens_seen': 2148976}
|
||||
{'loss': 1.2273, 'grad_norm': 0.6261684894561768, 'learning_rate': 9.963431452563332e-06, 'epoch': 1.45, 'num_input_tokens_seen': 2157024}
|
||||
{'loss': 1.0912, 'grad_norm': 0.6347564458847046, 'learning_rate': 9.651984392809914e-06, 'epoch': 1.46, 'num_input_tokens_seen': 2165456}
|
||||
{'loss': 1.2239, 'grad_norm': 0.5831385850906372, 'learning_rate': 9.344961947648623e-06, 'epoch': 1.46, 'num_input_tokens_seen': 2173280}
|
||||
{'loss': 1.2145, 'grad_norm': 0.5968154072761536, 'learning_rate': 9.042397785550405e-06, 'epoch': 1.47, 'num_input_tokens_seen': 2180496}
|
||||
{'loss': 1.3317, 'grad_norm': 0.685257077217102, 'learning_rate': 8.744325086085248e-06, 'epoch': 1.47, 'num_input_tokens_seen': 2187648}
|
||||
{'loss': 1.244, 'grad_norm': 0.7777274250984192, 'learning_rate': 8.450776536283594e-06, 'epoch': 1.48, 'num_input_tokens_seen': 2194960}
|
||||
{'loss': 1.2788, 'grad_norm': 0.6194698810577393, 'learning_rate': 8.16178432705192e-06, 'epoch': 1.48, 'num_input_tokens_seen': 2203072}
|
||||
{'loss': 1.1167, 'grad_norm': 0.41038453578948975, 'learning_rate': 7.877380149642626e-06, 'epoch': 1.49, 'num_input_tokens_seen': 2211808}
|
||||
{'loss': 1.4073, 'grad_norm': 0.8537396192550659, 'learning_rate': 7.597595192178702e-06, 'epoch': 1.49, 'num_input_tokens_seen': 2220656}
|
||||
{'loss': 1.3246, 'grad_norm': 0.5099229216575623, 'learning_rate': 7.322460136233622e-06, 'epoch': 1.5, 'num_input_tokens_seen': 2228368}
|
||||
{'loss': 1.2071, 'grad_norm': 0.47402122616767883, 'learning_rate': 7.052005153466779e-06, 'epoch': 1.5, 'num_input_tokens_seen': 2236560}
|
||||
{'loss': 1.2258, 'grad_norm': 0.7217351794242859, 'learning_rate': 6.786259902314768e-06, 'epoch': 1.51, 'num_input_tokens_seen': 2244352}
|
||||
{'loss': 1.3457, 'grad_norm': 0.4602849781513214, 'learning_rate': 6.52525352473905e-06, 'epoch': 1.51, 'num_input_tokens_seen': 2252976}
|
||||
{'loss': 1.2233, 'grad_norm': 0.536681592464447, 'learning_rate': 6.269014643030213e-06, 'epoch': 1.52, 'num_input_tokens_seen': 2260800}
|
||||
{'loss': 1.2691, 'grad_norm': 0.37533435225486755, 'learning_rate': 6.017571356669183e-06, 'epoch': 1.53, 'num_input_tokens_seen': 2269632}
|
||||
{'loss': 1.1786, 'grad_norm': 0.7929604053497314, 'learning_rate': 5.770951239245803e-06, 'epoch': 1.53, 'num_input_tokens_seen': 2276864}
|
||||
{'loss': 1.1514, 'grad_norm': 0.7656516432762146, 'learning_rate': 5.529181335435124e-06, 'epoch': 1.54, 'num_input_tokens_seen': 2283808}
|
||||
{'loss': 1.245, 'grad_norm': 0.5882564187049866, 'learning_rate': 5.292288158031594e-06, 'epoch': 1.54, 'num_input_tokens_seen': 2291072}
|
||||
{'loss': 1.1263, 'grad_norm': 0.5979832410812378, 'learning_rate': 5.060297685041659e-06, 'epoch': 1.55, 'num_input_tokens_seen': 2298448}
|
||||
{'loss': 1.2041, 'grad_norm': 0.9165539741516113, 'learning_rate': 4.833235356834959e-06, 'epoch': 1.55, 'num_input_tokens_seen': 2307248}
|
||||
{'loss': 1.1853, 'grad_norm': 0.49357733130455017, 'learning_rate': 4.611126073354571e-06, 'epoch': 1.56, 'num_input_tokens_seen': 2316192}
|
||||
{'loss': 1.2726, 'grad_norm': 0.8443873524665833, 'learning_rate': 4.3939941913863525e-06, 'epoch': 1.56, 'num_input_tokens_seen': 2322704}
|
||||
{'loss': 1.2391, 'grad_norm': 0.5544173121452332, 'learning_rate': 4.181863521888019e-06, 'epoch': 1.57, 'num_input_tokens_seen': 2329600}
|
||||
{'loss': 1.2163, 'grad_norm': 0.6180024743080139, 'learning_rate': 3.974757327377981e-06, 'epoch': 1.57, 'num_input_tokens_seen': 2337120}
|
||||
{'loss': 1.2243, 'grad_norm': 0.8412026166915894, 'learning_rate': 3.772698319384349e-06, 'epoch': 1.58, 'num_input_tokens_seen': 2344896}
|
||||
{'loss': 1.3121, 'grad_norm': 0.5388696789741516, 'learning_rate': 3.575708655954324e-06, 'epoch': 1.58, 'num_input_tokens_seen': 2354384}
|
||||
{'loss': 1.1817, 'grad_norm': 0.6869313716888428, 'learning_rate': 3.3838099392243916e-06, 'epoch': 1.59, 'num_input_tokens_seen': 2363984}
|
||||
{'loss': 1.2091, 'grad_norm': 0.6533048748970032, 'learning_rate': 3.197023213051337e-06, 'epoch': 1.59, 'num_input_tokens_seen': 2373008}
|
||||
{'loss': 1.2872, 'grad_norm': 0.5742602944374084, 'learning_rate': 3.0153689607045845e-06, 'epoch': 1.6, 'num_input_tokens_seen': 2379984}
|
||||
{'loss': 1.2326, 'grad_norm': 0.6805256605148315, 'learning_rate': 2.8388671026199522e-06, 'epoch': 1.61, 'num_input_tokens_seen': 2387472}
|
||||
{'loss': 1.2061, 'grad_norm': 0.5272711515426636, 'learning_rate': 2.667536994215186e-06, 'epoch': 1.61, 'num_input_tokens_seen': 2395504}
|
||||
{'loss': 1.3237, 'grad_norm': 0.6741738319396973, 'learning_rate': 2.501397423767382e-06, 'epoch': 1.62, 'num_input_tokens_seen': 2402752}
|
||||
{'loss': 1.2587, 'grad_norm': 0.5710972547531128, 'learning_rate': 2.340466610352654e-06, 'epoch': 1.62, 'num_input_tokens_seen': 2411056}
|
||||
{'loss': 1.2855, 'grad_norm': 0.8992254137992859, 'learning_rate': 2.1847622018482283e-06, 'epoch': 1.63, 'num_input_tokens_seen': 2419232}
|
||||
{'loss': 1.3, 'grad_norm': 1.1530307531356812, 'learning_rate': 2.0343012729971243e-06, 'epoch': 1.63, 'num_input_tokens_seen': 2426768}
|
||||
{'loss': 1.257, 'grad_norm': 0.9547299742698669, 'learning_rate': 1.8891003235357308e-06, 'epoch': 1.64, 'num_input_tokens_seen': 2434160}
|
||||
{'loss': 1.2312, 'grad_norm': 0.5935231447219849, 'learning_rate': 1.7491752763844293e-06, 'epoch': 1.64, 'num_input_tokens_seen': 2442512}
|
||||
{'loss': 1.1204, 'grad_norm': 0.601706326007843, 'learning_rate': 1.6145414759014431e-06, 'epoch': 1.65, 'num_input_tokens_seen': 2450480}
|
||||
{'loss': 1.2204, 'grad_norm': 0.6756156086921692, 'learning_rate': 1.4852136862001764e-06, 'epoch': 1.65, 'num_input_tokens_seen': 2459168}
|
||||
{'loss': 1.2004, 'grad_norm': 0.6837950944900513, 'learning_rate': 1.3612060895301759e-06, 'epoch': 1.66, 'num_input_tokens_seen': 2466032}
|
||||
{'loss': 1.3066, 'grad_norm': 0.5756305456161499, 'learning_rate': 1.2425322847218368e-06, 'epoch': 1.66, 'num_input_tokens_seen': 2474352}
|
||||
{'loss': 1.1302, 'grad_norm': 0.7406111359596252, 'learning_rate': 1.1292052856952062e-06, 'epoch': 1.67, 'num_input_tokens_seen': 2481792}
|
||||
{'loss': 1.0001, 'grad_norm': 0.5320125818252563, 'learning_rate': 1.0212375200327973e-06, 'epoch': 1.67, 'num_input_tokens_seen': 2489424}
|
||||
{'loss': 1.2162, 'grad_norm': 0.5728276968002319, 'learning_rate': 9.186408276168013e-07, 'epoch': 1.68, 'num_input_tokens_seen': 2497888}
|
||||
{'loss': 1.3685, 'grad_norm': 0.7525292634963989, 'learning_rate': 8.214264593307098e-07, 'epoch': 1.69, 'num_input_tokens_seen': 2504592}
|
||||
{'loss': 1.3391, 'grad_norm': 1.1481095552444458, 'learning_rate': 7.296050758254957e-07, 'epoch': 1.69, 'num_input_tokens_seen': 2512480}
|
||||
{'loss': 1.2548, 'grad_norm': 0.5299532413482666, 'learning_rate': 6.431867463506048e-07, 'epoch': 1.7, 'num_input_tokens_seen': 2519504}
|
||||
{'loss': 1.3917, 'grad_norm': 0.5739784240722656, 'learning_rate': 5.621809476497098e-07, 'epoch': 1.7, 'num_input_tokens_seen': 2527360}
|
||||
{'loss': 1.1987, 'grad_norm': 0.831795334815979, 'learning_rate': 4.865965629214819e-07, 'epoch': 1.71, 'num_input_tokens_seen': 2533856}
|
||||
{'loss': 1.4638, 'grad_norm': 0.450013667345047, 'learning_rate': 4.1644188084548063e-07, 'epoch': 1.71, 'num_input_tokens_seen': 2541616}
|
||||
{'loss': 1.1836, 'grad_norm': 0.5501786470413208, 'learning_rate': 3.517245946731529e-07, 'epoch': 1.72, 'num_input_tokens_seen': 2549136}
|
||||
{'loss': 1.1814, 'grad_norm': 0.4375413656234741, 'learning_rate': 2.924518013842303e-07, 'epoch': 1.72, 'num_input_tokens_seen': 2557584}
|
||||
{'loss': 1.2356, 'grad_norm': 0.648204505443573, 'learning_rate': 2.386300009084408e-07, 'epoch': 1.73, 'num_input_tokens_seen': 2566880}
|
||||
{'loss': 1.1505, 'grad_norm': 0.6714820265769958, 'learning_rate': 1.9026509541272275e-07, 'epoch': 1.73, 'num_input_tokens_seen': 2575552}
|
||||
{'loss': 1.1444, 'grad_norm': 0.5864465236663818, 'learning_rate': 1.4736238865398765e-07, 'epoch': 1.74, 'num_input_tokens_seen': 2583600}
|
||||
{'loss': 1.3877, 'grad_norm': 0.4161326587200165, 'learning_rate': 1.0992658539750178e-07, 'epoch': 1.74, 'num_input_tokens_seen': 2591536}
|
||||
{'loss': 1.3222, 'grad_norm': 0.4580128788948059, 'learning_rate': 7.796179090094891e-08, 'epoch': 1.75, 'num_input_tokens_seen': 2599920}
|
||||
{'loss': 1.1343, 'grad_norm': 0.5980444550514221, 'learning_rate': 5.1471510464268236e-08, 'epoch': 1.75, 'num_input_tokens_seen': 2607344}
|
||||
{'loss': 1.1924, 'grad_norm': 0.6569440960884094, 'learning_rate': 3.04586490452119e-08, 'epoch': 1.76, 'num_input_tokens_seen': 2615792}
|
||||
{'loss': 1.2759, 'grad_norm': 0.7224981784820557, 'learning_rate': 1.4925510940844156e-08, 'epoch': 1.77, 'num_input_tokens_seen': 2623936}
|
||||
{'loss': 1.2298, 'grad_norm': 0.7565916180610657, 'learning_rate': 4.873799534788059e-09, 'epoch': 1.77, 'num_input_tokens_seen': 2631440}
|
||||
{'loss': 1.1737, 'grad_norm': 0.7147485017776489, 'learning_rate': 3.0461711048035415e-10, 'epoch': 1.78, 'num_input_tokens_seen': 2638432}
|
||||
{'eval_loss': 1.2747279405593872, 'eval_runtime': 21.18, 'eval_samples_per_second': 47.214, 'eval_steps_per_second': 23.607, 'epoch': 1.78, 'num_input_tokens_seen': 2640912}
|
||||
{'train_runtime': 1251.8768, 'train_samples_per_second': 12.781, 'train_steps_per_second': 0.799, 'train_loss': 1.2933769166469573, 'epoch': 1.78, 'num_input_tokens_seen': 2640912}
|
||||
***** train metrics *****
|
||||
epoch = 1.7778
|
||||
num_input_tokens_seen = 2640912
|
||||
total_flos = 105025636GF
|
||||
train_loss = 1.2934
|
||||
train_runtime = 0:20:51.87
|
||||
train_samples_per_second = 12.781
|
||||
train_steps_per_second = 0.799
|
||||
Figure saved at: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single/training_loss.png
|
||||
Figure saved at: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single/training_eval_loss.png
|
||||
08/21/2024 06:20:51 - WARNING - llamafactory.extras.ploting - No metric eval_accuracy to plot.
|
||||
***** eval metrics *****
|
||||
epoch = 1.7778
|
||||
eval_loss = 1.2747
|
||||
eval_runtime = 0:00:20.73
|
||||
eval_samples_per_second = 48.237
|
||||
eval_steps_per_second = 24.119
|
||||
num_input_tokens_seen = 2640912
|
|
@ -0,0 +1,66 @@
|
|||
---
|
||||
base_model: ../../llm/qwen
|
||||
library_name: peft
|
||||
license: other
|
||||
tags:
|
||||
- llama-factory
|
||||
- lora
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: Qwen_lora_sft_1_single
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# Qwen_lora_sft_1_single
|
||||
|
||||
This model is a fine-tuned version of [../../llm/qwen](https://huggingface.co/../../llm/qwen) on the belle_1m dataset.
|
||||
It achieves the following results on the evaluation set:
|
||||
- Loss: 1.2747
|
||||
- Num Input Tokens Seen: 2640912
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 0.0001
|
||||
- train_batch_size: 2
|
||||
- eval_batch_size: 2
|
||||
- seed: 42
|
||||
- gradient_accumulation_steps: 8
|
||||
- total_train_batch_size: 16
|
||||
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
||||
- lr_scheduler_type: cosine
|
||||
- lr_scheduler_warmup_ratio: 0.1
|
||||
- training_steps: 1000
|
||||
|
||||
### Training results
|
||||
|
||||
| Training Loss | Epoch | Step | Validation Loss | Input Tokens Seen |
|
||||
|:-------------:|:------:|:----:|:---------------:|:-----------------:|
|
||||
| 1.2374 | 0.8889 | 500 | 1.2757 | 1321104 |
|
||||
| 1.1737 | 1.7778 | 1000 | 1.2747 | 2640912 |
|
||||
|
||||
|
||||
### Framework versions
|
||||
|
||||
- PEFT 0.12.0
|
||||
- Transformers 4.43.4
|
||||
- Pytorch 2.4.0+cu121
|
||||
- Datasets 2.20.0
|
||||
- Tokenizers 0.19.1
|
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"alpha_pattern": {},
|
||||
"auto_mapping": null,
|
||||
"base_model_name_or_path": "../../llm/qwen",
|
||||
"bias": "none",
|
||||
"fan_in_fan_out": false,
|
||||
"inference_mode": true,
|
||||
"init_lora_weights": true,
|
||||
"layer_replication": null,
|
||||
"layers_pattern": null,
|
||||
"layers_to_transform": null,
|
||||
"loftq_config": {},
|
||||
"lora_alpha": 16,
|
||||
"lora_dropout": 0.0,
|
||||
"megatron_config": null,
|
||||
"megatron_core": "megatron.core",
|
||||
"modules_to_save": null,
|
||||
"peft_type": "LORA",
|
||||
"r": 8,
|
||||
"rank_pattern": {},
|
||||
"revision": null,
|
||||
"target_modules": [
|
||||
"c_attn",
|
||||
"c_proj",
|
||||
"w1",
|
||||
"w2"
|
||||
],
|
||||
"task_type": "CAUSAL_LM",
|
||||
"use_dora": false,
|
||||
"use_rslora": false
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"epoch": 1.7777777777777777,
|
||||
"eval_loss": 1.2747279405593872,
|
||||
"eval_runtime": 20.7308,
|
||||
"eval_samples_per_second": 48.237,
|
||||
"eval_steps_per_second": 24.119,
|
||||
"num_input_tokens_seen": 2640912,
|
||||
"total_flos": 1.1277041809371955e+17,
|
||||
"train_loss": 1.2933769166469573,
|
||||
"train_runtime": 1251.8768,
|
||||
"train_samples_per_second": 12.781,
|
||||
"train_steps_per_second": 0.799
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"epoch": 1.7777777777777777,
|
||||
"eval_loss": 1.2747279405593872,
|
||||
"eval_runtime": 20.7308,
|
||||
"eval_samples_per_second": 48.237,
|
||||
"eval_steps_per_second": 24.119,
|
||||
"num_input_tokens_seen": 2640912
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"eos_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": "<|im_end|>"
|
||||
}
|
|
@ -0,0 +1,276 @@
|
|||
# Copyright (c) Alibaba Cloud.
|
||||
#
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
"""Tokenization classes for QWen."""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
import unicodedata
|
||||
from typing import Collection, Dict, List, Set, Tuple, Union
|
||||
|
||||
import tiktoken
|
||||
from transformers import PreTrainedTokenizer, AddedToken
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
|
||||
|
||||
PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
|
||||
ENDOFTEXT = "<|endoftext|>"
|
||||
IMSTART = "<|im_start|>"
|
||||
IMEND = "<|im_end|>"
|
||||
# as the default behavior is changed to allow special tokens in
|
||||
# regular texts, the surface forms of special tokens need to be
|
||||
# as different as possible to minimize the impact
|
||||
EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
|
||||
# changed to use actual index to avoid misconfiguration with vocabulary expansion
|
||||
SPECIAL_START_ID = 151643
|
||||
SPECIAL_TOKENS = tuple(
|
||||
enumerate(
|
||||
(
|
||||
(
|
||||
ENDOFTEXT,
|
||||
IMSTART,
|
||||
IMEND,
|
||||
)
|
||||
+ EXTRAS
|
||||
),
|
||||
start=SPECIAL_START_ID,
|
||||
)
|
||||
)
|
||||
SPECIAL_TOKENS_SET = set(t for i, t in SPECIAL_TOKENS)
|
||||
|
||||
|
||||
def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
|
||||
with open(tiktoken_bpe_file, "rb") as f:
|
||||
contents = f.read()
|
||||
return {
|
||||
base64.b64decode(token): int(rank)
|
||||
for token, rank in (line.split() for line in contents.splitlines() if line)
|
||||
}
|
||||
|
||||
|
||||
class QWenTokenizer(PreTrainedTokenizer):
|
||||
"""QWen tokenizer."""
|
||||
|
||||
vocab_files_names = VOCAB_FILES_NAMES
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
errors="replace",
|
||||
extra_vocab_file=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# how to handle errors in decoding UTF-8 byte sequences
|
||||
# use ignore if you are in streaming inference
|
||||
self.errors = errors
|
||||
|
||||
self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: Dict[bytes, int]
|
||||
self.special_tokens = {
|
||||
token: index
|
||||
for index, token in SPECIAL_TOKENS
|
||||
}
|
||||
|
||||
# try load extra vocab from file
|
||||
if extra_vocab_file is not None:
|
||||
used_ids = set(self.mergeable_ranks.values()) | set(self.special_tokens.values())
|
||||
extra_mergeable_ranks = _load_tiktoken_bpe(extra_vocab_file)
|
||||
for token, index in extra_mergeable_ranks.items():
|
||||
if token in self.mergeable_ranks:
|
||||
logger.info(f"extra token {token} exists, skipping")
|
||||
continue
|
||||
if index in used_ids:
|
||||
logger.info(f'the index {index} for extra token {token} exists, skipping')
|
||||
continue
|
||||
self.mergeable_ranks[token] = index
|
||||
# the index may be sparse after this, but don't worry tiktoken.Encoding will handle this
|
||||
|
||||
enc = tiktoken.Encoding(
|
||||
"Qwen",
|
||||
pat_str=PAT_STR,
|
||||
mergeable_ranks=self.mergeable_ranks,
|
||||
special_tokens=self.special_tokens,
|
||||
)
|
||||
assert (
|
||||
len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
|
||||
), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
|
||||
|
||||
self.decoder = {
|
||||
v: k for k, v in self.mergeable_ranks.items()
|
||||
} # type: dict[int, bytes|str]
|
||||
self.decoder.update({v: k for k, v in self.special_tokens.items()})
|
||||
|
||||
self.tokenizer = enc # type: tiktoken.Encoding
|
||||
|
||||
self.eod_id = self.tokenizer.eot_token
|
||||
self.im_start_id = self.special_tokens[IMSTART]
|
||||
self.im_end_id = self.special_tokens[IMEND]
|
||||
|
||||
def __getstate__(self):
|
||||
# for pickle lovers
|
||||
state = self.__dict__.copy()
|
||||
del state["tokenizer"]
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
# tokenizer is not python native; don't pass it; rebuild it
|
||||
self.__dict__.update(state)
|
||||
enc = tiktoken.Encoding(
|
||||
"Qwen",
|
||||
pat_str=PAT_STR,
|
||||
mergeable_ranks=self.mergeable_ranks,
|
||||
special_tokens=self.special_tokens,
|
||||
)
|
||||
self.tokenizer = enc
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.tokenizer.n_vocab
|
||||
|
||||
def get_vocab(self) -> Dict[bytes, int]:
|
||||
return self.mergeable_ranks
|
||||
|
||||
def convert_tokens_to_ids(
|
||||
self, tokens: Union[bytes, str, List[Union[bytes, str]]]
|
||||
) -> List[int]:
|
||||
ids = []
|
||||
if isinstance(tokens, (str, bytes)):
|
||||
if tokens in self.special_tokens:
|
||||
return self.special_tokens[tokens]
|
||||
else:
|
||||
return self.mergeable_ranks.get(tokens)
|
||||
for token in tokens:
|
||||
if token in self.special_tokens:
|
||||
ids.append(self.special_tokens[token])
|
||||
else:
|
||||
ids.append(self.mergeable_ranks.get(token))
|
||||
return ids
|
||||
|
||||
def _add_tokens(
|
||||
self,
|
||||
new_tokens: Union[List[str], List[AddedToken]],
|
||||
special_tokens: bool = False,
|
||||
) -> int:
|
||||
if not special_tokens and new_tokens:
|
||||
raise ValueError("Adding regular tokens is not supported")
|
||||
for token in new_tokens:
|
||||
surface_form = token.content if isinstance(token, AddedToken) else token
|
||||
if surface_form not in SPECIAL_TOKENS_SET:
|
||||
raise ValueError("Adding unknown special tokens is not supported")
|
||||
return 0
|
||||
|
||||
def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
|
||||
"""
|
||||
Save only the vocabulary of the tokenizer (vocabulary).
|
||||
|
||||
Returns:
|
||||
`Tuple(str)`: Paths to the files saved.
|
||||
"""
|
||||
file_path = os.path.join(save_directory, "qwen.tiktoken")
|
||||
with open(file_path, "w", encoding="utf8") as w:
|
||||
for k, v in self.mergeable_ranks.items():
|
||||
line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
|
||||
w.write(line)
|
||||
return (file_path,)
|
||||
|
||||
def tokenize(
|
||||
self,
|
||||
text: str,
|
||||
allowed_special: Union[Set, str] = "all",
|
||||
disallowed_special: Union[Collection, str] = (),
|
||||
**kwargs,
|
||||
) -> List[Union[bytes, str]]:
|
||||
"""
|
||||
Converts a string in a sequence of tokens.
|
||||
|
||||
Args:
|
||||
text (`str`):
|
||||
The sequence to be encoded.
|
||||
allowed_special (`Literal["all"]` or `set`):
|
||||
The surface forms of the tokens to be encoded as special tokens in regular texts.
|
||||
Default to "all".
|
||||
disallowed_special (`Literal["all"]` or `Collection`):
|
||||
The surface forms of the tokens that should not be in regular texts and trigger errors.
|
||||
Default to an empty tuple.
|
||||
|
||||
kwargs (additional keyword arguments, *optional*):
|
||||
Will be passed to the underlying model specific encode method.
|
||||
|
||||
Returns:
|
||||
`List[bytes|str]`: The list of tokens.
|
||||
"""
|
||||
tokens = []
|
||||
text = unicodedata.normalize("NFC", text)
|
||||
|
||||
# this implementation takes a detour: text -> token id -> token surface forms
|
||||
for t in self.tokenizer.encode(
|
||||
text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
||||
):
|
||||
tokens.append(self.decoder[t])
|
||||
return tokens
|
||||
|
||||
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
|
||||
"""
|
||||
Converts a sequence of tokens in a single string.
|
||||
"""
|
||||
text = ""
|
||||
temp = b""
|
||||
for t in tokens:
|
||||
if isinstance(t, str):
|
||||
if temp:
|
||||
text += temp.decode("utf-8", errors=self.errors)
|
||||
temp = b""
|
||||
text += t
|
||||
elif isinstance(t, bytes):
|
||||
temp += t
|
||||
else:
|
||||
raise TypeError("token should only be of type types or str")
|
||||
if temp:
|
||||
text += temp.decode("utf-8", errors=self.errors)
|
||||
return text
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
return self.tokenizer.n_vocab
|
||||
|
||||
def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
|
||||
"""Converts an id to a token, special tokens included"""
|
||||
if index in self.decoder:
|
||||
return self.decoder[index]
|
||||
raise ValueError("unknown ids")
|
||||
|
||||
def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
|
||||
"""Converts a token to an id using the vocab, special tokens included"""
|
||||
if token in self.special_tokens:
|
||||
return self.special_tokens[token]
|
||||
if token in self.mergeable_ranks:
|
||||
return self.mergeable_ranks[token]
|
||||
raise ValueError("unknown token")
|
||||
|
||||
def _tokenize(self, text: str, **kwargs):
|
||||
"""
|
||||
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
||||
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
||||
|
||||
Do NOT take care of added tokens.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _decode(
|
||||
self,
|
||||
token_ids: Union[int, List[int]],
|
||||
skip_special_tokens: bool = False,
|
||||
errors: str = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
if isinstance(token_ids, int):
|
||||
token_ids = [token_ids]
|
||||
if skip_special_tokens:
|
||||
token_ids = [i for i in token_ids if i < self.eod_id]
|
||||
return self.tokenizer.decode(token_ids, errors=errors or self.errors)
|
|
@ -0,0 +1,17 @@
|
|||
{
|
||||
"added_tokens_decoder": {},
|
||||
"auto_map": {
|
||||
"AutoTokenizer": [
|
||||
"tokenization_qwen.QWenTokenizer",
|
||||
null
|
||||
]
|
||||
},
|
||||
"chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
|
||||
"clean_up_tokenization_spaces": true,
|
||||
"eos_token": "<|im_end|>",
|
||||
"model_max_length": 32768,
|
||||
"pad_token": "<|im_end|>",
|
||||
"padding_side": "right",
|
||||
"split_special_tokens": false,
|
||||
"tokenizer_class": "QWenTokenizer"
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"epoch": 1.7777777777777777,
|
||||
"num_input_tokens_seen": 2640912,
|
||||
"total_flos": 1.1277041809371955e+17,
|
||||
"train_loss": 1.2933769166469573,
|
||||
"train_runtime": 1251.8768,
|
||||
"train_samples_per_second": 12.781,
|
||||
"train_steps_per_second": 0.799
|
||||
}
|
|
@ -0,0 +1,336 @@
|
|||
{"current_steps": 3, "total_steps": 1000, "loss": 1.5198, "learning_rate": 3e-06, "epoch": 0.005333333333333333, "percentage": 0.3, "cur_time": "2024-08-21 06:00:02", "elapsed_time": "0:00:04", "remaining_time": "0:22:22", "throughput": "2428.65", "total_tokens": 9808}
|
||||
{"current_steps": 6, "total_steps": 1000, "loss": 1.5584, "learning_rate": 6e-06, "epoch": 0.010666666666666666, "percentage": 0.6, "cur_time": "2024-08-21 06:00:06", "elapsed_time": "0:00:07", "remaining_time": "0:20:51", "throughput": "2555.41", "total_tokens": 19312}
|
||||
{"current_steps": 9, "total_steps": 1000, "loss": 1.5898, "learning_rate": 9e-06, "epoch": 0.016, "percentage": 0.9, "cur_time": "2024-08-21 06:00:10", "elapsed_time": "0:00:11", "remaining_time": "0:20:33", "throughput": "2608.64", "total_tokens": 29232}
|
||||
{"current_steps": 12, "total_steps": 1000, "loss": 1.8087, "learning_rate": 1.2e-05, "epoch": 0.021333333333333333, "percentage": 1.2, "cur_time": "2024-08-21 06:00:13", "elapsed_time": "0:00:14", "remaining_time": "0:20:10", "throughput": "2582.83", "total_tokens": 37984}
|
||||
{"current_steps": 15, "total_steps": 1000, "loss": 1.715, "learning_rate": 1.5e-05, "epoch": 0.02666666666666667, "percentage": 1.5, "cur_time": "2024-08-21 06:00:16", "elapsed_time": "0:00:18", "remaining_time": "0:19:44", "throughput": "2471.43", "total_tokens": 44592}
|
||||
{"current_steps": 18, "total_steps": 1000, "loss": 1.7853, "learning_rate": 1.8e-05, "epoch": 0.032, "percentage": 1.8, "cur_time": "2024-08-21 06:00:20", "elapsed_time": "0:00:21", "remaining_time": "0:19:30", "throughput": "2442.25", "total_tokens": 52400}
|
||||
{"current_steps": 21, "total_steps": 1000, "loss": 1.5861, "learning_rate": 2.1e-05, "epoch": 0.037333333333333336, "percentage": 2.1, "cur_time": "2024-08-21 06:00:23", "elapsed_time": "0:00:24", "remaining_time": "0:19:16", "throughput": "2432.12", "total_tokens": 60320}
|
||||
{"current_steps": 24, "total_steps": 1000, "loss": 2.0417, "learning_rate": 2.4e-05, "epoch": 0.042666666666666665, "percentage": 2.4, "cur_time": "2024-08-21 06:00:27", "elapsed_time": "0:00:28", "remaining_time": "0:19:02", "throughput": "2386.67", "total_tokens": 67024}
|
||||
{"current_steps": 27, "total_steps": 1000, "loss": 1.6384, "learning_rate": 2.7000000000000002e-05, "epoch": 0.048, "percentage": 2.7, "cur_time": "2024-08-21 06:00:30", "elapsed_time": "0:00:31", "remaining_time": "0:18:50", "throughput": "2351.78", "total_tokens": 73776}
|
||||
{"current_steps": 30, "total_steps": 1000, "loss": 1.7021, "learning_rate": 3e-05, "epoch": 0.05333333333333334, "percentage": 3.0, "cur_time": "2024-08-21 06:00:33", "elapsed_time": "0:00:34", "remaining_time": "0:18:48", "throughput": "2366.23", "total_tokens": 82592}
|
||||
{"current_steps": 33, "total_steps": 1000, "loss": 1.6026, "learning_rate": 3.3e-05, "epoch": 0.058666666666666666, "percentage": 3.3, "cur_time": "2024-08-21 06:00:37", "elapsed_time": "0:00:38", "remaining_time": "0:18:43", "throughput": "2361.53", "total_tokens": 90512}
|
||||
{"current_steps": 36, "total_steps": 1000, "loss": 1.5229, "learning_rate": 3.6e-05, "epoch": 0.064, "percentage": 3.6, "cur_time": "2024-08-21 06:00:40", "elapsed_time": "0:00:41", "remaining_time": "0:18:39", "throughput": "2315.68", "total_tokens": 96848}
|
||||
{"current_steps": 39, "total_steps": 1000, "loss": 1.3826, "learning_rate": 3.9000000000000006e-05, "epoch": 0.06933333333333333, "percentage": 3.9, "cur_time": "2024-08-21 06:00:45", "elapsed_time": "0:00:46", "remaining_time": "0:19:10", "throughput": "2221.16", "total_tokens": 103728}
|
||||
{"current_steps": 42, "total_steps": 1000, "loss": 1.348, "learning_rate": 4.2e-05, "epoch": 0.07466666666666667, "percentage": 4.2, "cur_time": "2024-08-21 06:00:49", "elapsed_time": "0:00:51", "remaining_time": "0:19:23", "throughput": "2198.53", "total_tokens": 112160}
|
||||
{"current_steps": 45, "total_steps": 1000, "loss": 1.5667, "learning_rate": 4.5e-05, "epoch": 0.08, "percentage": 4.5, "cur_time": "2024-08-21 06:00:53", "elapsed_time": "0:00:54", "remaining_time": "0:19:12", "throughput": "2173.18", "total_tokens": 117984}
|
||||
{"current_steps": 48, "total_steps": 1000, "loss": 1.3849, "learning_rate": 4.8e-05, "epoch": 0.08533333333333333, "percentage": 4.8, "cur_time": "2024-08-21 06:00:56", "elapsed_time": "0:00:57", "remaining_time": "0:19:05", "throughput": "2192.49", "total_tokens": 126624}
|
||||
{"current_steps": 51, "total_steps": 1000, "loss": 1.578, "learning_rate": 5.1000000000000006e-05, "epoch": 0.09066666666666667, "percentage": 5.1, "cur_time": "2024-08-21 06:01:00", "elapsed_time": "0:01:01", "remaining_time": "0:18:57", "throughput": "2200.50", "total_tokens": 134496}
|
||||
{"current_steps": 54, "total_steps": 1000, "loss": 1.6441, "learning_rate": 5.4000000000000005e-05, "epoch": 0.096, "percentage": 5.4, "cur_time": "2024-08-21 06:01:03", "elapsed_time": "0:01:04", "remaining_time": "0:18:49", "throughput": "2196.67", "total_tokens": 141664}
|
||||
{"current_steps": 57, "total_steps": 1000, "loss": 1.3386, "learning_rate": 5.6999999999999996e-05, "epoch": 0.10133333333333333, "percentage": 5.7, "cur_time": "2024-08-21 06:01:06", "elapsed_time": "0:01:07", "remaining_time": "0:18:43", "throughput": "2199.84", "total_tokens": 149360}
|
||||
{"current_steps": 60, "total_steps": 1000, "loss": 1.3779, "learning_rate": 6e-05, "epoch": 0.10666666666666667, "percentage": 6.0, "cur_time": "2024-08-21 06:01:10", "elapsed_time": "0:01:11", "remaining_time": "0:18:37", "throughput": "2212.46", "total_tokens": 157776}
|
||||
{"current_steps": 63, "total_steps": 1000, "loss": 1.4769, "learning_rate": 6.3e-05, "epoch": 0.112, "percentage": 6.3, "cur_time": "2024-08-21 06:01:13", "elapsed_time": "0:01:14", "remaining_time": "0:18:30", "throughput": "2213.79", "total_tokens": 165360}
|
||||
{"current_steps": 66, "total_steps": 1000, "loss": 1.396, "learning_rate": 6.6e-05, "epoch": 0.11733333333333333, "percentage": 6.6, "cur_time": "2024-08-21 06:01:16", "elapsed_time": "0:01:18", "remaining_time": "0:18:24", "throughput": "2217.83", "total_tokens": 173152}
|
||||
{"current_steps": 69, "total_steps": 1000, "loss": 1.5464, "learning_rate": 6.9e-05, "epoch": 0.12266666666666666, "percentage": 6.9, "cur_time": "2024-08-21 06:01:20", "elapsed_time": "0:01:21", "remaining_time": "0:18:19", "throughput": "2226.41", "total_tokens": 181504}
|
||||
{"current_steps": 72, "total_steps": 1000, "loss": 1.4307, "learning_rate": 7.2e-05, "epoch": 0.128, "percentage": 7.2, "cur_time": "2024-08-21 06:01:23", "elapsed_time": "0:01:24", "remaining_time": "0:18:14", "throughput": "2224.81", "total_tokens": 188880}
|
||||
{"current_steps": 75, "total_steps": 1000, "loss": 1.4183, "learning_rate": 7.500000000000001e-05, "epoch": 0.13333333333333333, "percentage": 7.5, "cur_time": "2024-08-21 06:01:27", "elapsed_time": "0:01:28", "remaining_time": "0:18:09", "throughput": "2219.07", "total_tokens": 195952}
|
||||
{"current_steps": 78, "total_steps": 1000, "loss": 1.2132, "learning_rate": 7.800000000000001e-05, "epoch": 0.13866666666666666, "percentage": 7.8, "cur_time": "2024-08-21 06:01:30", "elapsed_time": "0:01:31", "remaining_time": "0:18:03", "throughput": "2223.00", "total_tokens": 203744}
|
||||
{"current_steps": 81, "total_steps": 1000, "loss": 1.1731, "learning_rate": 8.1e-05, "epoch": 0.144, "percentage": 8.1, "cur_time": "2024-08-21 06:01:33", "elapsed_time": "0:01:35", "remaining_time": "0:17:58", "throughput": "2236.46", "total_tokens": 212624}
|
||||
{"current_steps": 84, "total_steps": 1000, "loss": 1.528, "learning_rate": 8.4e-05, "epoch": 0.14933333333333335, "percentage": 8.4, "cur_time": "2024-08-21 06:01:37", "elapsed_time": "0:01:38", "remaining_time": "0:17:53", "throughput": "2233.87", "total_tokens": 219968}
|
||||
{"current_steps": 87, "total_steps": 1000, "loss": 1.275, "learning_rate": 8.7e-05, "epoch": 0.15466666666666667, "percentage": 8.7, "cur_time": "2024-08-21 06:01:40", "elapsed_time": "0:01:41", "remaining_time": "0:17:49", "throughput": "2247.91", "total_tokens": 229024}
|
||||
{"current_steps": 90, "total_steps": 1000, "loss": 1.2145, "learning_rate": 9e-05, "epoch": 0.16, "percentage": 9.0, "cur_time": "2024-08-21 06:01:44", "elapsed_time": "0:01:45", "remaining_time": "0:17:44", "throughput": "2248.82", "total_tokens": 236768}
|
||||
{"current_steps": 93, "total_steps": 1000, "loss": 1.3413, "learning_rate": 9.300000000000001e-05, "epoch": 0.16533333333333333, "percentage": 9.3, "cur_time": "2024-08-21 06:01:47", "elapsed_time": "0:01:48", "remaining_time": "0:17:39", "throughput": "2256.61", "total_tokens": 245152}
|
||||
{"current_steps": 96, "total_steps": 1000, "loss": 1.5684, "learning_rate": 9.6e-05, "epoch": 0.17066666666666666, "percentage": 9.6, "cur_time": "2024-08-21 06:01:50", "elapsed_time": "0:01:51", "remaining_time": "0:17:34", "throughput": "2248.43", "total_tokens": 251728}
|
||||
{"current_steps": 99, "total_steps": 1000, "loss": 1.3223, "learning_rate": 9.900000000000001e-05, "epoch": 0.176, "percentage": 9.9, "cur_time": "2024-08-21 06:01:54", "elapsed_time": "0:01:55", "remaining_time": "0:17:31", "throughput": "2258.54", "total_tokens": 260832}
|
||||
{"current_steps": 102, "total_steps": 1000, "loss": 1.3921, "learning_rate": 9.999878153526974e-05, "epoch": 0.18133333333333335, "percentage": 10.2, "cur_time": "2024-08-21 06:01:58", "elapsed_time": "0:01:59", "remaining_time": "0:17:29", "throughput": "2252.61", "total_tokens": 268448}
|
||||
{"current_steps": 105, "total_steps": 1000, "loss": 1.258, "learning_rate": 9.999238475781957e-05, "epoch": 0.18666666666666668, "percentage": 10.5, "cur_time": "2024-08-21 06:02:01", "elapsed_time": "0:02:02", "remaining_time": "0:17:25", "throughput": "2250.72", "total_tokens": 276016}
|
||||
{"current_steps": 108, "total_steps": 1000, "loss": 1.3191, "learning_rate": 9.998050575201771e-05, "epoch": 0.192, "percentage": 10.8, "cur_time": "2024-08-21 06:02:04", "elapsed_time": "0:02:05", "remaining_time": "0:17:20", "throughput": "2241.79", "total_tokens": 282416}
|
||||
{"current_steps": 111, "total_steps": 1000, "loss": 1.4481, "learning_rate": 9.996314582053106e-05, "epoch": 0.19733333333333333, "percentage": 11.1, "cur_time": "2024-08-21 06:02:08", "elapsed_time": "0:02:09", "remaining_time": "0:17:15", "throughput": "2242.16", "total_tokens": 290016}
|
||||
{"current_steps": 114, "total_steps": 1000, "loss": 1.5764, "learning_rate": 9.99403068670717e-05, "epoch": 0.20266666666666666, "percentage": 11.4, "cur_time": "2024-08-21 06:02:11", "elapsed_time": "0:02:12", "remaining_time": "0:17:12", "throughput": "2243.63", "total_tokens": 297952}
|
||||
{"current_steps": 117, "total_steps": 1000, "loss": 1.3542, "learning_rate": 9.991199139618827e-05, "epoch": 0.208, "percentage": 11.7, "cur_time": "2024-08-21 06:02:15", "elapsed_time": "0:02:16", "remaining_time": "0:17:07", "throughput": "2241.86", "total_tokens": 305152}
|
||||
{"current_steps": 120, "total_steps": 1000, "loss": 1.2433, "learning_rate": 9.987820251299122e-05, "epoch": 0.21333333333333335, "percentage": 12.0, "cur_time": "2024-08-21 06:02:18", "elapsed_time": "0:02:19", "remaining_time": "0:17:02", "throughput": "2242.24", "total_tokens": 312592}
|
||||
{"current_steps": 123, "total_steps": 1000, "loss": 1.2361, "learning_rate": 9.983894392281237e-05, "epoch": 0.21866666666666668, "percentage": 12.3, "cur_time": "2024-08-21 06:02:21", "elapsed_time": "0:02:22", "remaining_time": "0:16:57", "throughput": "2240.09", "total_tokens": 319792}
|
||||
{"current_steps": 126, "total_steps": 1000, "loss": 1.4045, "learning_rate": 9.979421993079852e-05, "epoch": 0.224, "percentage": 12.6, "cur_time": "2024-08-21 06:02:24", "elapsed_time": "0:02:26", "remaining_time": "0:16:53", "throughput": "2243.89", "total_tokens": 327776}
|
||||
{"current_steps": 129, "total_steps": 1000, "loss": 1.317, "learning_rate": 9.974403544143941e-05, "epoch": 0.22933333333333333, "percentage": 12.9, "cur_time": "2024-08-21 06:02:28", "elapsed_time": "0:02:29", "remaining_time": "0:16:49", "throughput": "2249.12", "total_tokens": 336432}
|
||||
{"current_steps": 132, "total_steps": 1000, "loss": 1.3257, "learning_rate": 9.968839595802982e-05, "epoch": 0.23466666666666666, "percentage": 13.2, "cur_time": "2024-08-21 06:02:31", "elapsed_time": "0:02:32", "remaining_time": "0:16:45", "throughput": "2244.39", "total_tokens": 343216}
|
||||
{"current_steps": 135, "total_steps": 1000, "loss": 1.2573, "learning_rate": 9.962730758206611e-05, "epoch": 0.24, "percentage": 13.5, "cur_time": "2024-08-21 06:02:35", "elapsed_time": "0:02:36", "remaining_time": "0:16:40", "throughput": "2241.14", "total_tokens": 350096}
|
||||
{"current_steps": 138, "total_steps": 1000, "loss": 1.3345, "learning_rate": 9.956077701257709e-05, "epoch": 0.24533333333333332, "percentage": 13.8, "cur_time": "2024-08-21 06:02:38", "elapsed_time": "0:02:39", "remaining_time": "0:16:37", "throughput": "2248.14", "total_tokens": 359040}
|
||||
{"current_steps": 141, "total_steps": 1000, "loss": 1.2392, "learning_rate": 9.948881154538945e-05, "epoch": 0.25066666666666665, "percentage": 14.1, "cur_time": "2024-08-21 06:02:42", "elapsed_time": "0:02:43", "remaining_time": "0:16:33", "throughput": "2252.99", "total_tokens": 367568}
|
||||
{"current_steps": 144, "total_steps": 1000, "loss": 1.2553, "learning_rate": 9.941141907232765e-05, "epoch": 0.256, "percentage": 14.4, "cur_time": "2024-08-21 06:02:45", "elapsed_time": "0:02:46", "remaining_time": "0:16:29", "throughput": "2255.06", "total_tokens": 375552}
|
||||
{"current_steps": 147, "total_steps": 1000, "loss": 1.3964, "learning_rate": 9.932860808034848e-05, "epoch": 0.2613333333333333, "percentage": 14.7, "cur_time": "2024-08-21 06:02:49", "elapsed_time": "0:02:50", "remaining_time": "0:16:27", "throughput": "2267.09", "total_tokens": 385712}
|
||||
{"current_steps": 150, "total_steps": 1000, "loss": 1.1754, "learning_rate": 9.924038765061042e-05, "epoch": 0.26666666666666666, "percentage": 15.0, "cur_time": "2024-08-21 06:02:52", "elapsed_time": "0:02:53", "remaining_time": "0:16:22", "throughput": "2267.67", "total_tokens": 393344}
|
||||
{"current_steps": 153, "total_steps": 1000, "loss": 1.3174, "learning_rate": 9.914676745747772e-05, "epoch": 0.272, "percentage": 15.3, "cur_time": "2024-08-21 06:02:55", "elapsed_time": "0:02:56", "remaining_time": "0:16:18", "throughput": "2266.30", "total_tokens": 400720}
|
||||
{"current_steps": 156, "total_steps": 1000, "loss": 1.2322, "learning_rate": 9.904775776745958e-05, "epoch": 0.2773333333333333, "percentage": 15.6, "cur_time": "2024-08-21 06:02:59", "elapsed_time": "0:03:00", "remaining_time": "0:16:14", "throughput": "2270.39", "total_tokens": 409056}
|
||||
{"current_steps": 159, "total_steps": 1000, "loss": 1.3971, "learning_rate": 9.894336943808426e-05, "epoch": 0.2826666666666667, "percentage": 15.9, "cur_time": "2024-08-21 06:03:02", "elapsed_time": "0:03:03", "remaining_time": "0:16:10", "throughput": "2267.78", "total_tokens": 416144}
|
||||
{"current_steps": 162, "total_steps": 1000, "loss": 1.1801, "learning_rate": 9.88336139167084e-05, "epoch": 0.288, "percentage": 16.2, "cur_time": "2024-08-21 06:03:05", "elapsed_time": "0:03:06", "remaining_time": "0:16:06", "throughput": "2263.89", "total_tokens": 422880}
|
||||
{"current_steps": 165, "total_steps": 1000, "loss": 1.2315, "learning_rate": 9.871850323926177e-05, "epoch": 0.29333333333333333, "percentage": 16.5, "cur_time": "2024-08-21 06:03:09", "elapsed_time": "0:03:10", "remaining_time": "0:16:01", "throughput": "2259.24", "total_tokens": 429440}
|
||||
{"current_steps": 168, "total_steps": 1000, "loss": 1.2711, "learning_rate": 9.859805002892732e-05, "epoch": 0.2986666666666667, "percentage": 16.8, "cur_time": "2024-08-21 06:03:12", "elapsed_time": "0:03:13", "remaining_time": "0:15:58", "throughput": "2259.37", "total_tokens": 437200}
|
||||
{"current_steps": 171, "total_steps": 1000, "loss": 1.3031, "learning_rate": 9.847226749475695e-05, "epoch": 0.304, "percentage": 17.1, "cur_time": "2024-08-21 06:03:15", "elapsed_time": "0:03:17", "remaining_time": "0:15:55", "throughput": "2265.76", "total_tokens": 446416}
|
||||
{"current_steps": 174, "total_steps": 1000, "loss": 1.4374, "learning_rate": 9.834116943022298e-05, "epoch": 0.30933333333333335, "percentage": 17.4, "cur_time": "2024-08-21 06:03:19", "elapsed_time": "0:03:20", "remaining_time": "0:15:51", "throughput": "2268.25", "total_tokens": 454672}
|
||||
{"current_steps": 177, "total_steps": 1000, "loss": 1.2568, "learning_rate": 9.820477021170551e-05, "epoch": 0.31466666666666665, "percentage": 17.7, "cur_time": "2024-08-21 06:03:22", "elapsed_time": "0:03:23", "remaining_time": "0:15:48", "throughput": "2271.75", "total_tokens": 463216}
|
||||
{"current_steps": 180, "total_steps": 1000, "loss": 1.273, "learning_rate": 9.806308479691595e-05, "epoch": 0.32, "percentage": 18.0, "cur_time": "2024-08-21 06:03:26", "elapsed_time": "0:03:27", "remaining_time": "0:15:44", "throughput": "2277.46", "total_tokens": 472432}
|
||||
{"current_steps": 183, "total_steps": 1000, "loss": 1.2393, "learning_rate": 9.791612872325667e-05, "epoch": 0.3253333333333333, "percentage": 18.3, "cur_time": "2024-08-21 06:03:29", "elapsed_time": "0:03:30", "remaining_time": "0:15:41", "throughput": "2276.93", "total_tokens": 479968}
|
||||
{"current_steps": 186, "total_steps": 1000, "loss": 1.4561, "learning_rate": 9.776391810611718e-05, "epoch": 0.33066666666666666, "percentage": 18.6, "cur_time": "2024-08-21 06:03:33", "elapsed_time": "0:03:34", "remaining_time": "0:15:37", "throughput": "2278.92", "total_tokens": 488112}
|
||||
{"current_steps": 189, "total_steps": 1000, "loss": 1.3804, "learning_rate": 9.760646963710694e-05, "epoch": 0.336, "percentage": 18.9, "cur_time": "2024-08-21 06:03:36", "elapsed_time": "0:03:37", "remaining_time": "0:15:33", "throughput": "2278.26", "total_tokens": 495808}
|
||||
{"current_steps": 192, "total_steps": 1000, "loss": 1.3654, "learning_rate": 9.744380058222483e-05, "epoch": 0.3413333333333333, "percentage": 19.2, "cur_time": "2024-08-21 06:03:39", "elapsed_time": "0:03:41", "remaining_time": "0:15:30", "throughput": "2276.19", "total_tokens": 503216}
|
||||
{"current_steps": 195, "total_steps": 1000, "loss": 1.2941, "learning_rate": 9.727592877996585e-05, "epoch": 0.3466666666666667, "percentage": 19.5, "cur_time": "2024-08-21 06:03:43", "elapsed_time": "0:03:44", "remaining_time": "0:15:26", "throughput": "2278.42", "total_tokens": 511568}
|
||||
{"current_steps": 198, "total_steps": 1000, "loss": 1.5844, "learning_rate": 9.710287263936484e-05, "epoch": 0.352, "percentage": 19.8, "cur_time": "2024-08-21 06:03:46", "elapsed_time": "0:03:47", "remaining_time": "0:15:23", "throughput": "2279.18", "total_tokens": 519536}
|
||||
{"current_steps": 201, "total_steps": 1000, "loss": 1.5176, "learning_rate": 9.69246511379778e-05, "epoch": 0.35733333333333334, "percentage": 20.1, "cur_time": "2024-08-21 06:03:50", "elapsed_time": "0:03:51", "remaining_time": "0:15:20", "throughput": "2273.25", "total_tokens": 526576}
|
||||
{"current_steps": 204, "total_steps": 1000, "loss": 1.3372, "learning_rate": 9.674128381980072e-05, "epoch": 0.3626666666666667, "percentage": 20.4, "cur_time": "2024-08-21 06:03:53", "elapsed_time": "0:03:54", "remaining_time": "0:15:16", "throughput": "2271.97", "total_tokens": 533824}
|
||||
{"current_steps": 207, "total_steps": 1000, "loss": 1.2416, "learning_rate": 9.655279079312642e-05, "epoch": 0.368, "percentage": 20.7, "cur_time": "2024-08-21 06:03:57", "elapsed_time": "0:03:58", "remaining_time": "0:15:13", "throughput": "2271.68", "total_tokens": 541472}
|
||||
{"current_steps": 210, "total_steps": 1000, "loss": 1.2045, "learning_rate": 9.635919272833938e-05, "epoch": 0.37333333333333335, "percentage": 21.0, "cur_time": "2024-08-21 06:04:00", "elapsed_time": "0:04:01", "remaining_time": "0:15:09", "throughput": "2275.26", "total_tokens": 550144}
|
||||
{"current_steps": 213, "total_steps": 1000, "loss": 1.371, "learning_rate": 9.616051085564906e-05, "epoch": 0.37866666666666665, "percentage": 21.3, "cur_time": "2024-08-21 06:04:04", "elapsed_time": "0:04:05", "remaining_time": "0:15:06", "throughput": "2274.79", "total_tokens": 557872}
|
||||
{"current_steps": 216, "total_steps": 1000, "loss": 1.2994, "learning_rate": 9.595676696276172e-05, "epoch": 0.384, "percentage": 21.6, "cur_time": "2024-08-21 06:04:07", "elapsed_time": "0:04:08", "remaining_time": "0:15:02", "throughput": "2268.21", "total_tokens": 563728}
|
||||
{"current_steps": 219, "total_steps": 1000, "loss": 1.3101, "learning_rate": 9.574798339249125e-05, "epoch": 0.3893333333333333, "percentage": 21.9, "cur_time": "2024-08-21 06:04:10", "elapsed_time": "0:04:11", "remaining_time": "0:14:58", "throughput": "2266.45", "total_tokens": 571056}
|
||||
{"current_steps": 222, "total_steps": 1000, "loss": 1.1064, "learning_rate": 9.553418304030886e-05, "epoch": 0.39466666666666667, "percentage": 22.2, "cur_time": "2024-08-21 06:04:14", "elapsed_time": "0:04:15", "remaining_time": "0:14:55", "throughput": "2266.67", "total_tokens": 579184}
|
||||
{"current_steps": 225, "total_steps": 1000, "loss": 1.4372, "learning_rate": 9.53153893518325e-05, "epoch": 0.4, "percentage": 22.5, "cur_time": "2024-08-21 06:04:17", "elapsed_time": "0:04:18", "remaining_time": "0:14:51", "throughput": "2264.15", "total_tokens": 586128}
|
||||
{"current_steps": 228, "total_steps": 1000, "loss": 1.4349, "learning_rate": 9.50916263202557e-05, "epoch": 0.4053333333333333, "percentage": 22.8, "cur_time": "2024-08-21 06:04:21", "elapsed_time": "0:04:22", "remaining_time": "0:14:48", "throughput": "2267.37", "total_tokens": 594848}
|
||||
{"current_steps": 231, "total_steps": 1000, "loss": 1.3169, "learning_rate": 9.486291848371643e-05, "epoch": 0.4106666666666667, "percentage": 23.1, "cur_time": "2024-08-21 06:04:24", "elapsed_time": "0:04:25", "remaining_time": "0:14:44", "throughput": "2267.70", "total_tokens": 602544}
|
||||
{"current_steps": 234, "total_steps": 1000, "loss": 1.3912, "learning_rate": 9.462929092260628e-05, "epoch": 0.416, "percentage": 23.4, "cur_time": "2024-08-21 06:04:28", "elapsed_time": "0:04:29", "remaining_time": "0:14:41", "throughput": "2266.06", "total_tokens": 609904}
|
||||
{"current_steps": 237, "total_steps": 1000, "loss": 1.1353, "learning_rate": 9.439076925682006e-05, "epoch": 0.42133333333333334, "percentage": 23.7, "cur_time": "2024-08-21 06:04:31", "elapsed_time": "0:04:32", "remaining_time": "0:14:37", "throughput": "2269.06", "total_tokens": 618576}
|
||||
{"current_steps": 240, "total_steps": 1000, "loss": 1.3647, "learning_rate": 9.414737964294636e-05, "epoch": 0.4266666666666667, "percentage": 24.0, "cur_time": "2024-08-21 06:04:34", "elapsed_time": "0:04:36", "remaining_time": "0:14:34", "throughput": "2271.01", "total_tokens": 626960}
|
||||
{"current_steps": 243, "total_steps": 1000, "loss": 1.359, "learning_rate": 9.389914877139903e-05, "epoch": 0.432, "percentage": 24.3, "cur_time": "2024-08-21 06:04:38", "elapsed_time": "0:04:39", "remaining_time": "0:14:30", "throughput": "2271.85", "total_tokens": 634896}
|
||||
{"current_steps": 246, "total_steps": 1000, "loss": 1.2965, "learning_rate": 9.364610386349049e-05, "epoch": 0.43733333333333335, "percentage": 24.6, "cur_time": "2024-08-21 06:04:41", "elapsed_time": "0:04:42", "remaining_time": "0:14:26", "throughput": "2272.65", "total_tokens": 642640}
|
||||
{"current_steps": 249, "total_steps": 1000, "loss": 1.2079, "learning_rate": 9.338827266844644e-05, "epoch": 0.44266666666666665, "percentage": 24.9, "cur_time": "2024-08-21 06:04:45", "elapsed_time": "0:04:46", "remaining_time": "0:14:23", "throughput": "2274.33", "total_tokens": 650816}
|
||||
{"current_steps": 252, "total_steps": 1000, "loss": 1.2893, "learning_rate": 9.312568346036288e-05, "epoch": 0.448, "percentage": 25.2, "cur_time": "2024-08-21 06:04:48", "elapsed_time": "0:04:49", "remaining_time": "0:14:19", "throughput": "2275.39", "total_tokens": 658736}
|
||||
{"current_steps": 255, "total_steps": 1000, "loss": 1.3256, "learning_rate": 9.285836503510562e-05, "epoch": 0.4533333333333333, "percentage": 25.5, "cur_time": "2024-08-21 06:04:51", "elapsed_time": "0:04:52", "remaining_time": "0:14:15", "throughput": "2275.29", "total_tokens": 666416}
|
||||
{"current_steps": 258, "total_steps": 1000, "loss": 1.3449, "learning_rate": 9.258634670715238e-05, "epoch": 0.45866666666666667, "percentage": 25.8, "cur_time": "2024-08-21 06:04:55", "elapsed_time": "0:04:56", "remaining_time": "0:14:12", "throughput": "2270.74", "total_tokens": 673312}
|
||||
{"current_steps": 261, "total_steps": 1000, "loss": 1.3915, "learning_rate": 9.230965830637821e-05, "epoch": 0.464, "percentage": 26.1, "cur_time": "2024-08-21 06:04:59", "elapsed_time": "0:05:00", "remaining_time": "0:14:10", "throughput": "2268.48", "total_tokens": 681408}
|
||||
{"current_steps": 264, "total_steps": 1000, "loss": 1.2321, "learning_rate": 9.202833017478422e-05, "epoch": 0.4693333333333333, "percentage": 26.4, "cur_time": "2024-08-21 06:05:03", "elapsed_time": "0:05:04", "remaining_time": "0:14:10", "throughput": "2265.02", "total_tokens": 690608}
|
||||
{"current_steps": 267, "total_steps": 1000, "loss": 1.3235, "learning_rate": 9.174239316317033e-05, "epoch": 0.4746666666666667, "percentage": 26.7, "cur_time": "2024-08-21 06:05:07", "elapsed_time": "0:05:09", "remaining_time": "0:14:08", "throughput": "2260.68", "total_tokens": 698576}
|
||||
{"current_steps": 270, "total_steps": 1000, "loss": 1.1704, "learning_rate": 9.145187862775209e-05, "epoch": 0.48, "percentage": 27.0, "cur_time": "2024-08-21 06:05:11", "elapsed_time": "0:05:13", "remaining_time": "0:14:06", "throughput": "2261.22", "total_tokens": 707808}
|
||||
{"current_steps": 273, "total_steps": 1000, "loss": 1.234, "learning_rate": 9.11568184267221e-05, "epoch": 0.48533333333333334, "percentage": 27.3, "cur_time": "2024-08-21 06:05:15", "elapsed_time": "0:05:16", "remaining_time": "0:14:02", "throughput": "2255.55", "total_tokens": 714016}
|
||||
{"current_steps": 276, "total_steps": 1000, "loss": 1.2134, "learning_rate": 9.085724491675642e-05, "epoch": 0.49066666666666664, "percentage": 27.6, "cur_time": "2024-08-21 06:05:19", "elapsed_time": "0:05:20", "remaining_time": "0:13:59", "throughput": "2253.31", "total_tokens": 721552}
|
||||
{"current_steps": 279, "total_steps": 1000, "loss": 1.3759, "learning_rate": 9.055319094946633e-05, "epoch": 0.496, "percentage": 27.9, "cur_time": "2024-08-21 06:05:22", "elapsed_time": "0:05:23", "remaining_time": "0:13:57", "throughput": "2250.71", "total_tokens": 729104}
|
||||
{"current_steps": 282, "total_steps": 1000, "loss": 1.3744, "learning_rate": 9.02446898677957e-05, "epoch": 0.5013333333333333, "percentage": 28.2, "cur_time": "2024-08-21 06:05:26", "elapsed_time": "0:05:27", "remaining_time": "0:13:53", "throughput": "2248.59", "total_tokens": 736496}
|
||||
{"current_steps": 285, "total_steps": 1000, "loss": 1.242, "learning_rate": 8.993177550236464e-05, "epoch": 0.5066666666666667, "percentage": 28.5, "cur_time": "2024-08-21 06:05:30", "elapsed_time": "0:05:31", "remaining_time": "0:13:51", "throughput": "2248.72", "total_tokens": 745184}
|
||||
{"current_steps": 288, "total_steps": 1000, "loss": 1.2122, "learning_rate": 8.961448216775954e-05, "epoch": 0.512, "percentage": 28.8, "cur_time": "2024-08-21 06:05:33", "elapsed_time": "0:05:34", "remaining_time": "0:13:48", "throughput": "2245.35", "total_tokens": 752048}
|
||||
{"current_steps": 291, "total_steps": 1000, "loss": 1.2173, "learning_rate": 8.92928446587701e-05, "epoch": 0.5173333333333333, "percentage": 29.1, "cur_time": "2024-08-21 06:05:37", "elapsed_time": "0:05:38", "remaining_time": "0:13:45", "throughput": "2246.96", "total_tokens": 761088}
|
||||
{"current_steps": 294, "total_steps": 1000, "loss": 1.2942, "learning_rate": 8.896689824657372e-05, "epoch": 0.5226666666666666, "percentage": 29.4, "cur_time": "2024-08-21 06:05:41", "elapsed_time": "0:05:42", "remaining_time": "0:13:41", "throughput": "2243.85", "total_tokens": 768080}
|
||||
{"current_steps": 297, "total_steps": 1000, "loss": 1.2633, "learning_rate": 8.863667867486756e-05, "epoch": 0.528, "percentage": 29.7, "cur_time": "2024-08-21 06:05:45", "elapsed_time": "0:05:46", "remaining_time": "0:13:39", "throughput": "2246.14", "total_tokens": 777808}
|
||||
{"current_steps": 300, "total_steps": 1000, "loss": 1.3964, "learning_rate": 8.83022221559489e-05, "epoch": 0.5333333333333333, "percentage": 30.0, "cur_time": "2024-08-21 06:05:49", "elapsed_time": "0:05:50", "remaining_time": "0:13:36", "throughput": "2245.19", "total_tokens": 786032}
|
||||
{"current_steps": 303, "total_steps": 1000, "loss": 1.2115, "learning_rate": 8.796356536674403e-05, "epoch": 0.5386666666666666, "percentage": 30.3, "cur_time": "2024-08-21 06:05:52", "elapsed_time": "0:05:54", "remaining_time": "0:13:34", "throughput": "2240.99", "total_tokens": 793424}
|
||||
{"current_steps": 306, "total_steps": 1000, "loss": 1.1753, "learning_rate": 8.762074544478623e-05, "epoch": 0.544, "percentage": 30.6, "cur_time": "2024-08-21 06:05:56", "elapsed_time": "0:05:57", "remaining_time": "0:13:31", "throughput": "2240.09", "total_tokens": 801392}
|
||||
{"current_steps": 309, "total_steps": 1000, "loss": 1.1468, "learning_rate": 8.727379998414311e-05, "epoch": 0.5493333333333333, "percentage": 30.9, "cur_time": "2024-08-21 06:06:00", "elapsed_time": "0:06:01", "remaining_time": "0:13:28", "throughput": "2238.64", "total_tokens": 809184}
|
||||
{"current_steps": 312, "total_steps": 1000, "loss": 1.359, "learning_rate": 8.692276703129421e-05, "epoch": 0.5546666666666666, "percentage": 31.2, "cur_time": "2024-08-21 06:06:03", "elapsed_time": "0:06:05", "remaining_time": "0:13:25", "throughput": "2237.70", "total_tokens": 816896}
|
||||
{"current_steps": 315, "total_steps": 1000, "loss": 1.3192, "learning_rate": 8.656768508095853e-05, "epoch": 0.56, "percentage": 31.5, "cur_time": "2024-08-21 06:06:07", "elapsed_time": "0:06:08", "remaining_time": "0:13:21", "throughput": "2239.76", "total_tokens": 825952}
|
||||
{"current_steps": 318, "total_steps": 1000, "loss": 1.2414, "learning_rate": 8.620859307187339e-05, "epoch": 0.5653333333333334, "percentage": 31.8, "cur_time": "2024-08-21 06:06:11", "elapsed_time": "0:06:12", "remaining_time": "0:13:19", "throughput": "2242.68", "total_tokens": 835664}
|
||||
{"current_steps": 321, "total_steps": 1000, "loss": 1.2118, "learning_rate": 8.584553038252414e-05, "epoch": 0.5706666666666667, "percentage": 32.1, "cur_time": "2024-08-21 06:06:15", "elapsed_time": "0:06:16", "remaining_time": "0:13:16", "throughput": "2243.10", "total_tokens": 844288}
|
||||
{"current_steps": 324, "total_steps": 1000, "loss": 1.3021, "learning_rate": 8.547853682682604e-05, "epoch": 0.576, "percentage": 32.4, "cur_time": "2024-08-21 06:06:18", "elapsed_time": "0:06:20", "remaining_time": "0:13:12", "throughput": "2240.97", "total_tokens": 851584}
|
||||
{"current_steps": 327, "total_steps": 1000, "loss": 1.3737, "learning_rate": 8.510765264975813e-05, "epoch": 0.5813333333333334, "percentage": 32.7, "cur_time": "2024-08-21 06:06:22", "elapsed_time": "0:06:23", "remaining_time": "0:13:10", "throughput": "2242.99", "total_tokens": 861216}
|
||||
{"current_steps": 330, "total_steps": 1000, "loss": 1.3771, "learning_rate": 8.473291852294987e-05, "epoch": 0.5866666666666667, "percentage": 33.0, "cur_time": "2024-08-21 06:06:26", "elapsed_time": "0:06:27", "remaining_time": "0:13:07", "throughput": "2243.71", "total_tokens": 869792}
|
||||
{"current_steps": 333, "total_steps": 1000, "loss": 1.1644, "learning_rate": 8.435437554022115e-05, "epoch": 0.592, "percentage": 33.3, "cur_time": "2024-08-21 06:06:30", "elapsed_time": "0:06:31", "remaining_time": "0:13:03", "throughput": "2241.92", "total_tokens": 877216}
|
||||
{"current_steps": 336, "total_steps": 1000, "loss": 1.2409, "learning_rate": 8.397206521307584e-05, "epoch": 0.5973333333333334, "percentage": 33.6, "cur_time": "2024-08-21 06:06:33", "elapsed_time": "0:06:34", "remaining_time": "0:13:00", "throughput": "2237.88", "total_tokens": 883488}
|
||||
{"current_steps": 339, "total_steps": 1000, "loss": 1.1997, "learning_rate": 8.358602946614951e-05, "epoch": 0.6026666666666667, "percentage": 33.9, "cur_time": "2024-08-21 06:06:37", "elapsed_time": "0:06:38", "remaining_time": "0:12:56", "throughput": "2235.83", "total_tokens": 890816}
|
||||
{"current_steps": 342, "total_steps": 1000, "loss": 1.2954, "learning_rate": 8.319631063261209e-05, "epoch": 0.608, "percentage": 34.2, "cur_time": "2024-08-21 06:06:41", "elapsed_time": "0:06:42", "remaining_time": "0:12:54", "throughput": "2236.99", "total_tokens": 900352}
|
||||
{"current_steps": 345, "total_steps": 1000, "loss": 1.187, "learning_rate": 8.280295144952536e-05, "epoch": 0.6133333333333333, "percentage": 34.5, "cur_time": "2024-08-21 06:06:45", "elapsed_time": "0:06:46", "remaining_time": "0:12:51", "throughput": "2238.08", "total_tokens": 909312}
|
||||
{"current_steps": 348, "total_steps": 1000, "loss": 1.1707, "learning_rate": 8.240599505315655e-05, "epoch": 0.6186666666666667, "percentage": 34.8, "cur_time": "2024-08-21 06:06:48", "elapsed_time": "0:06:49", "remaining_time": "0:12:48", "throughput": "2237.19", "total_tokens": 917168}
|
||||
{"current_steps": 351, "total_steps": 1000, "loss": 1.2333, "learning_rate": 8.200548497424778e-05, "epoch": 0.624, "percentage": 35.1, "cur_time": "2024-08-21 06:06:52", "elapsed_time": "0:06:53", "remaining_time": "0:12:44", "throughput": "2233.48", "total_tokens": 923424}
|
||||
{"current_steps": 354, "total_steps": 1000, "loss": 1.3041, "learning_rate": 8.160146513324254e-05, "epoch": 0.6293333333333333, "percentage": 35.4, "cur_time": "2024-08-21 06:06:56", "elapsed_time": "0:06:57", "remaining_time": "0:12:41", "throughput": "2232.52", "total_tokens": 931152}
|
||||
{"current_steps": 357, "total_steps": 1000, "loss": 1.23, "learning_rate": 8.119397983546932e-05, "epoch": 0.6346666666666667, "percentage": 35.7, "cur_time": "2024-08-21 06:06:59", "elapsed_time": "0:07:00", "remaining_time": "0:12:38", "throughput": "2235.39", "total_tokens": 940992}
|
||||
{"current_steps": 360, "total_steps": 1000, "loss": 1.3671, "learning_rate": 8.07830737662829e-05, "epoch": 0.64, "percentage": 36.0, "cur_time": "2024-08-21 06:07:03", "elapsed_time": "0:07:04", "remaining_time": "0:12:35", "throughput": "2236.61", "total_tokens": 949936}
|
||||
{"current_steps": 363, "total_steps": 1000, "loss": 1.3496, "learning_rate": 8.036879198616434e-05, "epoch": 0.6453333333333333, "percentage": 36.3, "cur_time": "2024-08-21 06:07:07", "elapsed_time": "0:07:08", "remaining_time": "0:12:31", "throughput": "2236.71", "total_tokens": 958432}
|
||||
{"current_steps": 366, "total_steps": 1000, "loss": 1.1982, "learning_rate": 7.99511799257793e-05, "epoch": 0.6506666666666666, "percentage": 36.6, "cur_time": "2024-08-21 06:07:11", "elapsed_time": "0:07:12", "remaining_time": "0:12:28", "throughput": "2236.97", "total_tokens": 966688}
|
||||
{"current_steps": 369, "total_steps": 1000, "loss": 1.1809, "learning_rate": 7.953028338099627e-05, "epoch": 0.656, "percentage": 36.9, "cur_time": "2024-08-21 06:07:14", "elapsed_time": "0:07:15", "remaining_time": "0:12:24", "throughput": "2235.37", "total_tokens": 973872}
|
||||
{"current_steps": 372, "total_steps": 1000, "loss": 1.3528, "learning_rate": 7.910614850786448e-05, "epoch": 0.6613333333333333, "percentage": 37.2, "cur_time": "2024-08-21 06:07:18", "elapsed_time": "0:07:19", "remaining_time": "0:12:21", "throughput": "2233.65", "total_tokens": 981264}
|
||||
{"current_steps": 375, "total_steps": 1000, "loss": 1.4088, "learning_rate": 7.86788218175523e-05, "epoch": 0.6666666666666666, "percentage": 37.5, "cur_time": "2024-08-21 06:07:21", "elapsed_time": "0:07:22", "remaining_time": "0:12:18", "throughput": "2232.25", "total_tokens": 988800}
|
||||
{"current_steps": 378, "total_steps": 1000, "loss": 1.4252, "learning_rate": 7.82483501712469e-05, "epoch": 0.672, "percentage": 37.8, "cur_time": "2024-08-21 06:07:25", "elapsed_time": "0:07:26", "remaining_time": "0:12:14", "throughput": "2231.68", "total_tokens": 996656}
|
||||
{"current_steps": 381, "total_steps": 1000, "loss": 1.2573, "learning_rate": 7.781478077501525e-05, "epoch": 0.6773333333333333, "percentage": 38.1, "cur_time": "2024-08-21 06:07:29", "elapsed_time": "0:07:30", "remaining_time": "0:12:11", "throughput": "2231.36", "total_tokens": 1004672}
|
||||
{"current_steps": 384, "total_steps": 1000, "loss": 1.2001, "learning_rate": 7.737816117462752e-05, "epoch": 0.6826666666666666, "percentage": 38.4, "cur_time": "2024-08-21 06:07:32", "elapsed_time": "0:07:33", "remaining_time": "0:12:08", "throughput": "2230.78", "total_tokens": 1012400}
|
||||
{"current_steps": 387, "total_steps": 1000, "loss": 1.2168, "learning_rate": 7.693853925034315e-05, "epoch": 0.688, "percentage": 38.7, "cur_time": "2024-08-21 06:07:36", "elapsed_time": "0:07:37", "remaining_time": "0:12:04", "throughput": "2227.06", "total_tokens": 1018768}
|
||||
{"current_steps": 390, "total_steps": 1000, "loss": 1.3135, "learning_rate": 7.649596321166024e-05, "epoch": 0.6933333333333334, "percentage": 39.0, "cur_time": "2024-08-21 06:07:40", "elapsed_time": "0:07:41", "remaining_time": "0:12:01", "throughput": "2225.69", "total_tokens": 1026368}
|
||||
{"current_steps": 393, "total_steps": 1000, "loss": 1.3444, "learning_rate": 7.605048159202883e-05, "epoch": 0.6986666666666667, "percentage": 39.3, "cur_time": "2024-08-21 06:07:43", "elapsed_time": "0:07:44", "remaining_time": "0:11:57", "throughput": "2224.25", "total_tokens": 1033664}
|
||||
{"current_steps": 396, "total_steps": 1000, "loss": 1.2206, "learning_rate": 7.560214324352858e-05, "epoch": 0.704, "percentage": 39.6, "cur_time": "2024-08-21 06:07:47", "elapsed_time": "0:07:48", "remaining_time": "0:11:54", "throughput": "2224.91", "total_tokens": 1042560}
|
||||
{"current_steps": 399, "total_steps": 1000, "loss": 1.306, "learning_rate": 7.515099733151177e-05, "epoch": 0.7093333333333334, "percentage": 39.9, "cur_time": "2024-08-21 06:07:51", "elapsed_time": "0:07:52", "remaining_time": "0:11:51", "throughput": "2224.10", "total_tokens": 1050336}
|
||||
{"current_steps": 402, "total_steps": 1000, "loss": 1.3917, "learning_rate": 7.469709332921155e-05, "epoch": 0.7146666666666667, "percentage": 40.2, "cur_time": "2024-08-21 06:07:55", "elapsed_time": "0:07:56", "remaining_time": "0:11:48", "throughput": "2220.80", "total_tokens": 1057824}
|
||||
{"current_steps": 405, "total_steps": 1000, "loss": 1.4311, "learning_rate": 7.424048101231686e-05, "epoch": 0.72, "percentage": 40.5, "cur_time": "2024-08-21 06:07:58", "elapsed_time": "0:08:00", "remaining_time": "0:11:45", "throughput": "2218.84", "total_tokens": 1065184}
|
||||
{"current_steps": 408, "total_steps": 1000, "loss": 1.3075, "learning_rate": 7.378121045351378e-05, "epoch": 0.7253333333333334, "percentage": 40.8, "cur_time": "2024-08-21 06:08:02", "elapsed_time": "0:08:03", "remaining_time": "0:11:41", "throughput": "2216.67", "total_tokens": 1072016}
|
||||
{"current_steps": 411, "total_steps": 1000, "loss": 1.2019, "learning_rate": 7.331933201699457e-05, "epoch": 0.7306666666666667, "percentage": 41.1, "cur_time": "2024-08-21 06:08:06", "elapsed_time": "0:08:07", "remaining_time": "0:11:38", "throughput": "2215.50", "total_tokens": 1079584}
|
||||
{"current_steps": 414, "total_steps": 1000, "loss": 1.1311, "learning_rate": 7.285489635293472e-05, "epoch": 0.736, "percentage": 41.4, "cur_time": "2024-08-21 06:08:09", "elapsed_time": "0:08:10", "remaining_time": "0:11:34", "throughput": "2215.87", "total_tokens": 1087984}
|
||||
{"current_steps": 417, "total_steps": 1000, "loss": 1.1931, "learning_rate": 7.238795439193848e-05, "epoch": 0.7413333333333333, "percentage": 41.7, "cur_time": "2024-08-21 06:08:13", "elapsed_time": "0:08:14", "remaining_time": "0:11:31", "throughput": "2215.99", "total_tokens": 1096032}
|
||||
{"current_steps": 420, "total_steps": 1000, "loss": 1.1565, "learning_rate": 7.191855733945387e-05, "epoch": 0.7466666666666667, "percentage": 42.0, "cur_time": "2024-08-21 06:08:17", "elapsed_time": "0:08:18", "remaining_time": "0:11:28", "throughput": "2217.98", "total_tokens": 1105696}
|
||||
{"current_steps": 423, "total_steps": 1000, "loss": 1.2883, "learning_rate": 7.14467566701573e-05, "epoch": 0.752, "percentage": 42.3, "cur_time": "2024-08-21 06:08:21", "elapsed_time": "0:08:22", "remaining_time": "0:11:25", "throughput": "2218.09", "total_tokens": 1114048}
|
||||
{"current_steps": 426, "total_steps": 1000, "loss": 1.4102, "learning_rate": 7.097260412230886e-05, "epoch": 0.7573333333333333, "percentage": 42.6, "cur_time": "2024-08-21 06:08:24", "elapsed_time": "0:08:25", "remaining_time": "0:11:21", "throughput": "2218.53", "total_tokens": 1122528}
|
||||
{"current_steps": 429, "total_steps": 1000, "loss": 1.2941, "learning_rate": 7.049615169207864e-05, "epoch": 0.7626666666666667, "percentage": 42.9, "cur_time": "2024-08-21 06:08:28", "elapsed_time": "0:08:29", "remaining_time": "0:11:18", "throughput": "2219.43", "total_tokens": 1131328}
|
||||
{"current_steps": 432, "total_steps": 1000, "loss": 1.155, "learning_rate": 7.001745162784477e-05, "epoch": 0.768, "percentage": 43.2, "cur_time": "2024-08-21 06:08:32", "elapsed_time": "0:08:33", "remaining_time": "0:11:14", "throughput": "2218.90", "total_tokens": 1138992}
|
||||
{"current_steps": 435, "total_steps": 1000, "loss": 1.3493, "learning_rate": 6.953655642446368e-05, "epoch": 0.7733333333333333, "percentage": 43.5, "cur_time": "2024-08-21 06:08:36", "elapsed_time": "0:08:37", "remaining_time": "0:11:11", "throughput": "2219.64", "total_tokens": 1147776}
|
||||
{"current_steps": 438, "total_steps": 1000, "loss": 1.2435, "learning_rate": 6.905351881751372e-05, "epoch": 0.7786666666666666, "percentage": 43.8, "cur_time": "2024-08-21 06:08:39", "elapsed_time": "0:08:41", "remaining_time": "0:11:08", "throughput": "2221.29", "total_tokens": 1157360}
|
||||
{"current_steps": 441, "total_steps": 1000, "loss": 1.2651, "learning_rate": 6.856839177751176e-05, "epoch": 0.784, "percentage": 44.1, "cur_time": "2024-08-21 06:08:43", "elapsed_time": "0:08:44", "remaining_time": "0:11:05", "throughput": "2221.71", "total_tokens": 1166256}
|
||||
{"current_steps": 444, "total_steps": 1000, "loss": 1.3279, "learning_rate": 6.808122850410461e-05, "epoch": 0.7893333333333333, "percentage": 44.4, "cur_time": "2024-08-21 06:08:47", "elapsed_time": "0:08:48", "remaining_time": "0:11:01", "throughput": "2220.56", "total_tokens": 1173840}
|
||||
{"current_steps": 447, "total_steps": 1000, "loss": 1.3869, "learning_rate": 6.759208242023509e-05, "epoch": 0.7946666666666666, "percentage": 44.7, "cur_time": "2024-08-21 06:08:51", "elapsed_time": "0:08:52", "remaining_time": "0:10:58", "throughput": "2218.83", "total_tokens": 1180848}
|
||||
{"current_steps": 450, "total_steps": 1000, "loss": 1.31, "learning_rate": 6.710100716628344e-05, "epoch": 0.8, "percentage": 45.0, "cur_time": "2024-08-21 06:08:54", "elapsed_time": "0:08:56", "remaining_time": "0:10:55", "throughput": "2219.87", "total_tokens": 1189952}
|
||||
{"current_steps": 453, "total_steps": 1000, "loss": 1.225, "learning_rate": 6.660805659418516e-05, "epoch": 0.8053333333333333, "percentage": 45.3, "cur_time": "2024-08-21 06:08:58", "elapsed_time": "0:08:59", "remaining_time": "0:10:51", "throughput": "2219.91", "total_tokens": 1198336}
|
||||
{"current_steps": 456, "total_steps": 1000, "loss": 1.2687, "learning_rate": 6.611328476152557e-05, "epoch": 0.8106666666666666, "percentage": 45.6, "cur_time": "2024-08-21 06:09:02", "elapsed_time": "0:09:03", "remaining_time": "0:10:48", "throughput": "2219.75", "total_tokens": 1206304}
|
||||
{"current_steps": 459, "total_steps": 1000, "loss": 1.3772, "learning_rate": 6.561674592561163e-05, "epoch": 0.816, "percentage": 45.9, "cur_time": "2024-08-21 06:09:06", "elapsed_time": "0:09:07", "remaining_time": "0:10:44", "throughput": "2219.57", "total_tokens": 1214480}
|
||||
{"current_steps": 462, "total_steps": 1000, "loss": 1.2164, "learning_rate": 6.511849453752223e-05, "epoch": 0.8213333333333334, "percentage": 46.2, "cur_time": "2024-08-21 06:09:09", "elapsed_time": "0:09:10", "remaining_time": "0:10:41", "throughput": "2219.81", "total_tokens": 1222608}
|
||||
{"current_steps": 465, "total_steps": 1000, "loss": 1.3669, "learning_rate": 6.461858523613684e-05, "epoch": 0.8266666666666667, "percentage": 46.5, "cur_time": "2024-08-21 06:09:13", "elapsed_time": "0:09:14", "remaining_time": "0:10:37", "throughput": "2217.72", "total_tokens": 1229568}
|
||||
{"current_steps": 468, "total_steps": 1000, "loss": 1.2246, "learning_rate": 6.411707284214384e-05, "epoch": 0.832, "percentage": 46.8, "cur_time": "2024-08-21 06:09:16", "elapsed_time": "0:09:17", "remaining_time": "0:10:34", "throughput": "2216.49", "total_tokens": 1236656}
|
||||
{"current_steps": 471, "total_steps": 1000, "loss": 1.3515, "learning_rate": 6.361401235202872e-05, "epoch": 0.8373333333333334, "percentage": 47.1, "cur_time": "2024-08-21 06:09:20", "elapsed_time": "0:09:21", "remaining_time": "0:10:30", "throughput": "2215.09", "total_tokens": 1243776}
|
||||
{"current_steps": 474, "total_steps": 1000, "loss": 1.1246, "learning_rate": 6.310945893204324e-05, "epoch": 0.8426666666666667, "percentage": 47.4, "cur_time": "2024-08-21 06:09:24", "elapsed_time": "0:09:25", "remaining_time": "0:10:27", "throughput": "2216.47", "total_tokens": 1253296}
|
||||
{"current_steps": 477, "total_steps": 1000, "loss": 1.4245, "learning_rate": 6.26034679121557e-05, "epoch": 0.848, "percentage": 47.7, "cur_time": "2024-08-21 06:09:28", "elapsed_time": "0:09:29", "remaining_time": "0:10:24", "throughput": "2216.43", "total_tokens": 1261440}
|
||||
{"current_steps": 480, "total_steps": 1000, "loss": 1.4892, "learning_rate": 6.209609477998338e-05, "epoch": 0.8533333333333334, "percentage": 48.0, "cur_time": "2024-08-21 06:09:31", "elapsed_time": "0:09:32", "remaining_time": "0:10:20", "throughput": "2214.54", "total_tokens": 1268336}
|
||||
{"current_steps": 483, "total_steps": 1000, "loss": 1.4348, "learning_rate": 6.158739517470786e-05, "epoch": 0.8586666666666667, "percentage": 48.3, "cur_time": "2024-08-21 06:09:35", "elapsed_time": "0:09:36", "remaining_time": "0:10:17", "throughput": "2213.97", "total_tokens": 1276224}
|
||||
{"current_steps": 486, "total_steps": 1000, "loss": 1.1242, "learning_rate": 6.107742488097338e-05, "epoch": 0.864, "percentage": 48.6, "cur_time": "2024-08-21 06:09:39", "elapsed_time": "0:09:40", "remaining_time": "0:10:13", "throughput": "2213.30", "total_tokens": 1284112}
|
||||
{"current_steps": 489, "total_steps": 1000, "loss": 1.3794, "learning_rate": 6.056623982276944e-05, "epoch": 0.8693333333333333, "percentage": 48.9, "cur_time": "2024-08-21 06:09:42", "elapsed_time": "0:09:43", "remaining_time": "0:10:10", "throughput": "2213.72", "total_tokens": 1292800}
|
||||
{"current_steps": 492, "total_steps": 1000, "loss": 1.4105, "learning_rate": 6.005389605729824e-05, "epoch": 0.8746666666666667, "percentage": 49.2, "cur_time": "2024-08-21 06:09:46", "elapsed_time": "0:09:47", "remaining_time": "0:10:06", "throughput": "2213.30", "total_tokens": 1300656}
|
||||
{"current_steps": 495, "total_steps": 1000, "loss": 1.2852, "learning_rate": 5.9540449768827246e-05, "epoch": 0.88, "percentage": 49.5, "cur_time": "2024-08-21 06:09:50", "elapsed_time": "0:09:51", "remaining_time": "0:10:03", "throughput": "2214.13", "total_tokens": 1309792}
|
||||
{"current_steps": 498, "total_steps": 1000, "loss": 1.2374, "learning_rate": 5.902595726252801e-05, "epoch": 0.8853333333333333, "percentage": 49.8, "cur_time": "2024-08-21 06:09:54", "elapsed_time": "0:09:55", "remaining_time": "0:09:59", "throughput": "2211.35", "total_tokens": 1316000}
|
||||
{"current_steps": 500, "total_steps": 1000, "eval_loss": 1.275710940361023, "epoch": 0.8888888888888888, "percentage": 50.0, "cur_time": "2024-08-21 06:10:17", "elapsed_time": "0:10:18", "remaining_time": "0:10:18", "throughput": "2136.35", "total_tokens": 1321104}
|
||||
{"current_steps": 501, "total_steps": 1000, "loss": 1.3322, "learning_rate": 5.851047495830163e-05, "epoch": 0.8906666666666667, "percentage": 50.1, "cur_time": "2024-08-21 06:10:18", "elapsed_time": "0:10:19", "remaining_time": "0:10:17", "throughput": "2135.28", "total_tokens": 1323856}
|
||||
{"current_steps": 504, "total_steps": 1000, "loss": 1.4102, "learning_rate": 5.799405938459175e-05, "epoch": 0.896, "percentage": 50.4, "cur_time": "2024-08-21 06:10:22", "elapsed_time": "0:10:23", "remaining_time": "0:10:13", "throughput": "2136.77", "total_tokens": 1332832}
|
||||
{"current_steps": 507, "total_steps": 1000, "loss": 1.2225, "learning_rate": 5.747676717218549e-05, "epoch": 0.9013333333333333, "percentage": 50.7, "cur_time": "2024-08-21 06:10:26", "elapsed_time": "0:10:27", "remaining_time": "0:10:09", "throughput": "2135.31", "total_tokens": 1339504}
|
||||
{"current_steps": 510, "total_steps": 1000, "loss": 1.0999, "learning_rate": 5.695865504800327e-05, "epoch": 0.9066666666666666, "percentage": 51.0, "cur_time": "2024-08-21 06:10:29", "elapsed_time": "0:10:30", "remaining_time": "0:10:06", "throughput": "2135.31", "total_tokens": 1347200}
|
||||
{"current_steps": 513, "total_steps": 1000, "loss": 1.1401, "learning_rate": 5.643977982887815e-05, "epoch": 0.912, "percentage": 51.3, "cur_time": "2024-08-21 06:10:33", "elapsed_time": "0:10:34", "remaining_time": "0:10:02", "throughput": "2134.34", "total_tokens": 1354192}
|
||||
{"current_steps": 516, "total_steps": 1000, "loss": 1.5087, "learning_rate": 5.5920198415325064e-05, "epoch": 0.9173333333333333, "percentage": 51.6, "cur_time": "2024-08-21 06:10:37", "elapsed_time": "0:10:38", "remaining_time": "0:09:58", "throughput": "2135.34", "total_tokens": 1362864}
|
||||
{"current_steps": 519, "total_steps": 1000, "loss": 1.3877, "learning_rate": 5.539996778530115e-05, "epoch": 0.9226666666666666, "percentage": 51.9, "cur_time": "2024-08-21 06:10:40", "elapsed_time": "0:10:42", "remaining_time": "0:09:55", "throughput": "2136.37", "total_tokens": 1371616}
|
||||
{"current_steps": 522, "total_steps": 1000, "loss": 1.2798, "learning_rate": 5.487914498795747e-05, "epoch": 0.928, "percentage": 52.2, "cur_time": "2024-08-21 06:10:44", "elapsed_time": "0:10:45", "remaining_time": "0:09:51", "throughput": "2134.83", "total_tokens": 1378160}
|
||||
{"current_steps": 525, "total_steps": 1000, "loss": 1.2768, "learning_rate": 5.435778713738292e-05, "epoch": 0.9333333333333333, "percentage": 52.5, "cur_time": "2024-08-21 06:10:48", "elapsed_time": "0:10:49", "remaining_time": "0:09:47", "throughput": "2137.53", "total_tokens": 1388624}
|
||||
{"current_steps": 528, "total_steps": 1000, "loss": 1.2289, "learning_rate": 5.383595140634093e-05, "epoch": 0.9386666666666666, "percentage": 52.8, "cur_time": "2024-08-21 06:10:52", "elapsed_time": "0:10:53", "remaining_time": "0:09:44", "throughput": "2137.95", "total_tokens": 1397216}
|
||||
{"current_steps": 531, "total_steps": 1000, "loss": 1.1949, "learning_rate": 5.3313695020000024e-05, "epoch": 0.944, "percentage": 53.1, "cur_time": "2024-08-21 06:10:56", "elapsed_time": "0:10:57", "remaining_time": "0:09:40", "throughput": "2138.77", "total_tokens": 1405616}
|
||||
{"current_steps": 534, "total_steps": 1000, "loss": 1.0639, "learning_rate": 5.279107524965819e-05, "epoch": 0.9493333333333334, "percentage": 53.4, "cur_time": "2024-08-21 06:10:59", "elapsed_time": "0:11:00", "remaining_time": "0:09:36", "throughput": "2139.02", "total_tokens": 1413680}
|
||||
{"current_steps": 537, "total_steps": 1000, "loss": 1.3181, "learning_rate": 5.226814940646269e-05, "epoch": 0.9546666666666667, "percentage": 53.7, "cur_time": "2024-08-21 06:11:03", "elapsed_time": "0:11:04", "remaining_time": "0:09:32", "throughput": "2138.83", "total_tokens": 1421344}
|
||||
{"current_steps": 540, "total_steps": 1000, "loss": 1.2697, "learning_rate": 5.174497483512506e-05, "epoch": 0.96, "percentage": 54.0, "cur_time": "2024-08-21 06:11:07", "elapsed_time": "0:11:08", "remaining_time": "0:09:29", "throughput": "2140.26", "total_tokens": 1430592}
|
||||
{"current_steps": 543, "total_steps": 1000, "loss": 1.2928, "learning_rate": 5.1221608907632665e-05, "epoch": 0.9653333333333334, "percentage": 54.3, "cur_time": "2024-08-21 06:11:11", "elapsed_time": "0:11:12", "remaining_time": "0:09:25", "throughput": "2139.44", "total_tokens": 1437920}
|
||||
{"current_steps": 546, "total_steps": 1000, "loss": 1.3658, "learning_rate": 5.0698109016957274e-05, "epoch": 0.9706666666666667, "percentage": 54.6, "cur_time": "2024-08-21 06:11:14", "elapsed_time": "0:11:15", "remaining_time": "0:09:21", "throughput": "2139.54", "total_tokens": 1445824}
|
||||
{"current_steps": 549, "total_steps": 1000, "loss": 1.1596, "learning_rate": 5.017453257076119e-05, "epoch": 0.976, "percentage": 54.9, "cur_time": "2024-08-21 06:11:18", "elapsed_time": "0:11:19", "remaining_time": "0:09:18", "throughput": "2140.08", "total_tokens": 1453936}
|
||||
{"current_steps": 552, "total_steps": 1000, "loss": 1.3295, "learning_rate": 4.965093698510193e-05, "epoch": 0.9813333333333333, "percentage": 55.2, "cur_time": "2024-08-21 06:11:21", "elapsed_time": "0:11:23", "remaining_time": "0:09:14", "throughput": "2140.22", "total_tokens": 1461840}
|
||||
{"current_steps": 555, "total_steps": 1000, "loss": 1.1869, "learning_rate": 4.912737967813583e-05, "epoch": 0.9866666666666667, "percentage": 55.5, "cur_time": "2024-08-21 06:11:25", "elapsed_time": "0:11:26", "remaining_time": "0:09:10", "throughput": "2139.56", "total_tokens": 1469056}
|
||||
{"current_steps": 558, "total_steps": 1000, "loss": 1.2946, "learning_rate": 4.860391806382157e-05, "epoch": 0.992, "percentage": 55.8, "cur_time": "2024-08-21 06:11:29", "elapsed_time": "0:11:30", "remaining_time": "0:09:06", "throughput": "2138.67", "total_tokens": 1476144}
|
||||
{"current_steps": 561, "total_steps": 1000, "loss": 1.226, "learning_rate": 4.8080609545624004e-05, "epoch": 0.9973333333333333, "percentage": 56.1, "cur_time": "2024-08-21 06:11:32", "elapsed_time": "0:11:33", "remaining_time": "0:09:03", "throughput": "2138.13", "total_tokens": 1483664}
|
||||
{"current_steps": 564, "total_steps": 1000, "loss": 1.3204, "learning_rate": 4.755751151021934e-05, "epoch": 1.0026666666666666, "percentage": 56.4, "cur_time": "2024-08-21 06:11:36", "elapsed_time": "0:11:37", "remaining_time": "0:08:59", "throughput": "2139.41", "total_tokens": 1492768}
|
||||
{"current_steps": 567, "total_steps": 1000, "loss": 1.2531, "learning_rate": 4.703468132120193e-05, "epoch": 1.008, "percentage": 56.7, "cur_time": "2024-08-21 06:11:40", "elapsed_time": "0:11:41", "remaining_time": "0:08:55", "throughput": "2138.57", "total_tokens": 1499904}
|
||||
{"current_steps": 570, "total_steps": 1000, "loss": 1.3849, "learning_rate": 4.6512176312793736e-05, "epoch": 1.0133333333333334, "percentage": 57.0, "cur_time": "2024-08-21 06:11:44", "elapsed_time": "0:11:45", "remaining_time": "0:08:52", "throughput": "2139.08", "total_tokens": 1508704}
|
||||
{"current_steps": 573, "total_steps": 1000, "loss": 1.1251, "learning_rate": 4.599005378355706e-05, "epoch": 1.0186666666666666, "percentage": 57.3, "cur_time": "2024-08-21 06:11:47", "elapsed_time": "0:11:49", "remaining_time": "0:08:48", "throughput": "2139.95", "total_tokens": 1517328}
|
||||
{"current_steps": 576, "total_steps": 1000, "loss": 1.0598, "learning_rate": 4.5468370990111006e-05, "epoch": 1.024, "percentage": 57.6, "cur_time": "2024-08-21 06:11:51", "elapsed_time": "0:11:52", "remaining_time": "0:08:44", "throughput": "2139.25", "total_tokens": 1524384}
|
||||
{"current_steps": 579, "total_steps": 1000, "loss": 1.3841, "learning_rate": 4.494718514085268e-05, "epoch": 1.0293333333333334, "percentage": 57.9, "cur_time": "2024-08-21 06:11:55", "elapsed_time": "0:11:56", "remaining_time": "0:08:40", "throughput": "2139.12", "total_tokens": 1532032}
|
||||
{"current_steps": 582, "total_steps": 1000, "loss": 1.2045, "learning_rate": 4.442655338968373e-05, "epoch": 1.0346666666666666, "percentage": 58.2, "cur_time": "2024-08-21 06:11:58", "elapsed_time": "0:12:00", "remaining_time": "0:08:37", "throughput": "2142.06", "total_tokens": 1542432}
|
||||
{"current_steps": 585, "total_steps": 1000, "loss": 1.1681, "learning_rate": 4.390653282974264e-05, "epoch": 1.04, "percentage": 58.5, "cur_time": "2024-08-21 06:12:02", "elapsed_time": "0:12:03", "remaining_time": "0:08:33", "throughput": "2140.84", "total_tokens": 1549104}
|
||||
{"current_steps": 588, "total_steps": 1000, "loss": 1.1828, "learning_rate": 4.3387180487143876e-05, "epoch": 1.0453333333333332, "percentage": 58.8, "cur_time": "2024-08-21 06:12:06", "elapsed_time": "0:12:07", "remaining_time": "0:08:29", "throughput": "2142.22", "total_tokens": 1558432}
|
||||
{"current_steps": 591, "total_steps": 1000, "loss": 1.3796, "learning_rate": 4.2868553314724425e-05, "epoch": 1.0506666666666666, "percentage": 59.1, "cur_time": "2024-08-21 06:12:10", "elapsed_time": "0:12:11", "remaining_time": "0:08:25", "throughput": "2141.06", "total_tokens": 1565360}
|
||||
{"current_steps": 594, "total_steps": 1000, "loss": 1.3221, "learning_rate": 4.23507081857981e-05, "epoch": 1.056, "percentage": 59.4, "cur_time": "2024-08-21 06:12:13", "elapsed_time": "0:12:14", "remaining_time": "0:08:22", "throughput": "2141.22", "total_tokens": 1573200}
|
||||
{"current_steps": 597, "total_steps": 1000, "loss": 1.2565, "learning_rate": 4.1833701887918904e-05, "epoch": 1.0613333333333332, "percentage": 59.7, "cur_time": "2024-08-21 06:12:17", "elapsed_time": "0:12:18", "remaining_time": "0:08:18", "throughput": "2142.19", "total_tokens": 1581824}
|
||||
{"current_steps": 600, "total_steps": 1000, "loss": 1.1947, "learning_rate": 4.131759111665349e-05, "epoch": 1.0666666666666667, "percentage": 60.0, "cur_time": "2024-08-21 06:12:20", "elapsed_time": "0:12:21", "remaining_time": "0:08:14", "throughput": "2141.68", "total_tokens": 1589088}
|
||||
{"current_steps": 603, "total_steps": 1000, "loss": 1.3702, "learning_rate": 4.080243246936399e-05, "epoch": 1.072, "percentage": 60.3, "cur_time": "2024-08-21 06:12:24", "elapsed_time": "0:12:25", "remaining_time": "0:08:11", "throughput": "2139.94", "total_tokens": 1596080}
|
||||
{"current_steps": 606, "total_steps": 1000, "loss": 1.182, "learning_rate": 4.028828243900141e-05, "epoch": 1.0773333333333333, "percentage": 60.6, "cur_time": "2024-08-21 06:12:28", "elapsed_time": "0:12:29", "remaining_time": "0:08:07", "throughput": "2138.86", "total_tokens": 1602928}
|
||||
{"current_steps": 609, "total_steps": 1000, "loss": 1.3002, "learning_rate": 3.9775197407910485e-05, "epoch": 1.0826666666666667, "percentage": 60.9, "cur_time": "2024-08-21 06:12:32", "elapsed_time": "0:12:33", "remaining_time": "0:08:03", "throughput": "2139.64", "total_tokens": 1611552}
|
||||
{"current_steps": 612, "total_steps": 1000, "loss": 1.361, "learning_rate": 3.926323364164684e-05, "epoch": 1.088, "percentage": 61.2, "cur_time": "2024-08-21 06:12:35", "elapsed_time": "0:12:37", "remaining_time": "0:07:59", "throughput": "2141.55", "total_tokens": 1621312}
|
||||
{"current_steps": 615, "total_steps": 1000, "loss": 1.4355, "learning_rate": 3.875244728280676e-05, "epoch": 1.0933333333333333, "percentage": 61.5, "cur_time": "2024-08-21 06:12:39", "elapsed_time": "0:12:40", "remaining_time": "0:07:56", "throughput": "2142.44", "total_tokens": 1630144}
|
||||
{"current_steps": 618, "total_steps": 1000, "loss": 1.1478, "learning_rate": 3.82428943448705e-05, "epoch": 1.0986666666666667, "percentage": 61.8, "cur_time": "2024-08-21 06:12:43", "elapsed_time": "0:12:44", "remaining_time": "0:07:52", "throughput": "2142.86", "total_tokens": 1638288}
|
||||
{"current_steps": 621, "total_steps": 1000, "loss": 1.1475, "learning_rate": 3.773463070605987e-05, "epoch": 1.104, "percentage": 62.1, "cur_time": "2024-08-21 06:12:46", "elapsed_time": "0:12:47", "remaining_time": "0:07:48", "throughput": "2141.56", "total_tokens": 1644672}
|
||||
{"current_steps": 624, "total_steps": 1000, "loss": 1.1921, "learning_rate": 3.7227712103210486e-05, "epoch": 1.1093333333333333, "percentage": 62.4, "cur_time": "2024-08-21 06:12:50", "elapsed_time": "0:12:51", "remaining_time": "0:07:44", "throughput": "2140.78", "total_tokens": 1651664}
|
||||
{"current_steps": 627, "total_steps": 1000, "loss": 1.1563, "learning_rate": 3.6722194125659556e-05, "epoch": 1.1146666666666667, "percentage": 62.7, "cur_time": "2024-08-21 06:12:53", "elapsed_time": "0:12:55", "remaining_time": "0:07:41", "throughput": "2139.03", "total_tokens": 1657872}
|
||||
{"current_steps": 630, "total_steps": 1000, "loss": 1.2685, "learning_rate": 3.6218132209150045e-05, "epoch": 1.12, "percentage": 63.0, "cur_time": "2024-08-21 06:12:57", "elapsed_time": "0:12:58", "remaining_time": "0:07:37", "throughput": "2140.03", "total_tokens": 1666688}
|
||||
{"current_steps": 633, "total_steps": 1000, "loss": 1.2926, "learning_rate": 3.5715581629751326e-05, "epoch": 1.1253333333333333, "percentage": 63.3, "cur_time": "2024-08-21 06:13:01", "elapsed_time": "0:13:02", "remaining_time": "0:07:33", "throughput": "2141.36", "total_tokens": 1675840}
|
||||
{"current_steps": 636, "total_steps": 1000, "loss": 1.2786, "learning_rate": 3.5214597497797684e-05, "epoch": 1.1306666666666667, "percentage": 63.6, "cur_time": "2024-08-21 06:13:05", "elapsed_time": "0:13:06", "remaining_time": "0:07:30", "throughput": "2144.37", "total_tokens": 1686816}
|
||||
{"current_steps": 639, "total_steps": 1000, "loss": 1.2671, "learning_rate": 3.471523475184472e-05, "epoch": 1.1360000000000001, "percentage": 63.9, "cur_time": "2024-08-21 06:13:09", "elapsed_time": "0:13:10", "remaining_time": "0:07:26", "throughput": "2145.66", "total_tokens": 1695904}
|
||||
{"current_steps": 642, "total_steps": 1000, "loss": 1.2902, "learning_rate": 3.4217548152644885e-05, "epoch": 1.1413333333333333, "percentage": 64.2, "cur_time": "2024-08-21 06:13:13", "elapsed_time": "0:13:14", "remaining_time": "0:07:22", "throughput": "2145.94", "total_tokens": 1704112}
|
||||
{"current_steps": 645, "total_steps": 1000, "loss": 1.2884, "learning_rate": 3.372159227714218e-05, "epoch": 1.1466666666666667, "percentage": 64.5, "cur_time": "2024-08-21 06:13:16", "elapsed_time": "0:13:17", "remaining_time": "0:07:19", "throughput": "2146.31", "total_tokens": 1712240}
|
||||
{"current_steps": 648, "total_steps": 1000, "loss": 1.2189, "learning_rate": 3.322742151248725e-05, "epoch": 1.152, "percentage": 64.8, "cur_time": "2024-08-21 06:13:20", "elapsed_time": "0:13:21", "remaining_time": "0:07:15", "throughput": "2146.48", "total_tokens": 1720368}
|
||||
{"current_steps": 651, "total_steps": 1000, "loss": 1.2421, "learning_rate": 3.273509005007327e-05, "epoch": 1.1573333333333333, "percentage": 65.1, "cur_time": "2024-08-21 06:13:24", "elapsed_time": "0:13:25", "remaining_time": "0:07:11", "throughput": "2146.66", "total_tokens": 1728752}
|
||||
{"current_steps": 654, "total_steps": 1000, "loss": 1.2702, "learning_rate": 3.224465187959316e-05, "epoch": 1.1626666666666667, "percentage": 65.4, "cur_time": "2024-08-21 06:13:27", "elapsed_time": "0:13:28", "remaining_time": "0:07:07", "throughput": "2144.72", "total_tokens": 1734816}
|
||||
{"current_steps": 657, "total_steps": 1000, "loss": 1.2804, "learning_rate": 3.1756160783119016e-05, "epoch": 1.168, "percentage": 65.7, "cur_time": "2024-08-21 06:13:31", "elapsed_time": "0:13:32", "remaining_time": "0:07:04", "throughput": "2146.39", "total_tokens": 1744720}
|
||||
{"current_steps": 660, "total_steps": 1000, "loss": 1.3908, "learning_rate": 3.12696703292044e-05, "epoch": 1.1733333333333333, "percentage": 66.0, "cur_time": "2024-08-21 06:13:35", "elapsed_time": "0:13:36", "remaining_time": "0:07:00", "throughput": "2145.15", "total_tokens": 1751344}
|
||||
{"current_steps": 663, "total_steps": 1000, "loss": 1.2881, "learning_rate": 3.078523386700982e-05, "epoch": 1.1786666666666668, "percentage": 66.3, "cur_time": "2024-08-21 06:13:38", "elapsed_time": "0:13:39", "remaining_time": "0:06:56", "throughput": "2143.97", "total_tokens": 1757920}
|
||||
{"current_steps": 666, "total_steps": 1000, "loss": 1.322, "learning_rate": 3.0302904520452447e-05, "epoch": 1.184, "percentage": 66.6, "cur_time": "2024-08-21 06:13:42", "elapsed_time": "0:13:43", "remaining_time": "0:06:53", "throughput": "2143.95", "total_tokens": 1765968}
|
||||
{"current_steps": 669, "total_steps": 1000, "loss": 1.1882, "learning_rate": 2.9822735182380496e-05, "epoch": 1.1893333333333334, "percentage": 66.9, "cur_time": "2024-08-21 06:13:46", "elapsed_time": "0:13:47", "remaining_time": "0:06:49", "throughput": "2144.19", "total_tokens": 1774048}
|
||||
{"current_steps": 672, "total_steps": 1000, "loss": 1.2558, "learning_rate": 2.934477850877292e-05, "epoch": 1.1946666666666665, "percentage": 67.2, "cur_time": "2024-08-21 06:13:50", "elapsed_time": "0:13:51", "remaining_time": "0:06:45", "throughput": "2144.33", "total_tokens": 1782208}
|
||||
{"current_steps": 675, "total_steps": 1000, "loss": 1.3037, "learning_rate": 2.886908691296504e-05, "epoch": 1.2, "percentage": 67.5, "cur_time": "2024-08-21 06:13:53", "elapsed_time": "0:13:54", "remaining_time": "0:06:41", "throughput": "2144.20", "total_tokens": 1789760}
|
||||
{"current_steps": 678, "total_steps": 1000, "loss": 1.2251, "learning_rate": 2.8395712559900877e-05, "epoch": 1.2053333333333334, "percentage": 67.8, "cur_time": "2024-08-21 06:13:57", "elapsed_time": "0:13:58", "remaining_time": "0:06:38", "throughput": "2144.79", "total_tokens": 1797856}
|
||||
{"current_steps": 681, "total_steps": 1000, "loss": 1.3007, "learning_rate": 2.7924707360412746e-05, "epoch": 1.2106666666666666, "percentage": 68.1, "cur_time": "2024-08-21 06:14:00", "elapsed_time": "0:14:01", "remaining_time": "0:06:34", "throughput": "2143.32", "total_tokens": 1804208}
|
||||
{"current_steps": 684, "total_steps": 1000, "loss": 1.4622, "learning_rate": 2.7456122965528475e-05, "epoch": 1.216, "percentage": 68.4, "cur_time": "2024-08-21 06:14:04", "elapsed_time": "0:14:05", "remaining_time": "0:06:30", "throughput": "2142.37", "total_tokens": 1811376}
|
||||
{"current_steps": 687, "total_steps": 1000, "loss": 1.2426, "learning_rate": 2.699001076080742e-05, "epoch": 1.2213333333333334, "percentage": 68.7, "cur_time": "2024-08-21 06:14:08", "elapsed_time": "0:14:09", "remaining_time": "0:06:26", "throughput": "2143.99", "total_tokens": 1821040}
|
||||
{"current_steps": 690, "total_steps": 1000, "loss": 1.2246, "learning_rate": 2.6526421860705473e-05, "epoch": 1.2266666666666666, "percentage": 69.0, "cur_time": "2024-08-21 06:14:11", "elapsed_time": "0:14:12", "remaining_time": "0:06:23", "throughput": "2142.46", "total_tokens": 1827120}
|
||||
{"current_steps": 693, "total_steps": 1000, "loss": 1.3284, "learning_rate": 2.6065407102969664e-05, "epoch": 1.232, "percentage": 69.3, "cur_time": "2024-08-21 06:14:15", "elapsed_time": "0:14:16", "remaining_time": "0:06:19", "throughput": "2142.67", "total_tokens": 1835408}
|
||||
{"current_steps": 696, "total_steps": 1000, "loss": 1.2983, "learning_rate": 2.560701704306336e-05, "epoch": 1.2373333333333334, "percentage": 69.6, "cur_time": "2024-08-21 06:14:19", "elapsed_time": "0:14:20", "remaining_time": "0:06:15", "throughput": "2141.89", "total_tokens": 1842288}
|
||||
{"current_steps": 699, "total_steps": 1000, "loss": 1.2629, "learning_rate": 2.5151301948622237e-05, "epoch": 1.2426666666666666, "percentage": 69.9, "cur_time": "2024-08-21 06:14:23", "elapsed_time": "0:14:24", "remaining_time": "0:06:12", "throughput": "2143.75", "total_tokens": 1852576}
|
||||
{"current_steps": 702, "total_steps": 1000, "loss": 1.1546, "learning_rate": 2.469831179394182e-05, "epoch": 1.248, "percentage": 70.2, "cur_time": "2024-08-21 06:14:27", "elapsed_time": "0:14:28", "remaining_time": "0:06:08", "throughput": "2139.76", "total_tokens": 1858368}
|
||||
{"current_steps": 705, "total_steps": 1000, "loss": 1.1971, "learning_rate": 2.4248096254497288e-05, "epoch": 1.2533333333333334, "percentage": 70.5, "cur_time": "2024-08-21 06:14:31", "elapsed_time": "0:14:32", "remaining_time": "0:06:05", "throughput": "2140.94", "total_tokens": 1867648}
|
||||
{"current_steps": 708, "total_steps": 1000, "loss": 1.213, "learning_rate": 2.3800704701496053e-05, "epoch": 1.2586666666666666, "percentage": 70.8, "cur_time": "2024-08-21 06:14:34", "elapsed_time": "0:14:36", "remaining_time": "0:06:01", "throughput": "2140.72", "total_tokens": 1875280}
|
||||
{"current_steps": 711, "total_steps": 1000, "loss": 1.2494, "learning_rate": 2.33561861964635e-05, "epoch": 1.264, "percentage": 71.1, "cur_time": "2024-08-21 06:14:38", "elapsed_time": "0:14:39", "remaining_time": "0:05:57", "throughput": "2141.97", "total_tokens": 1884192}
|
||||
{"current_steps": 714, "total_steps": 1000, "loss": 1.1249, "learning_rate": 2.2914589485863014e-05, "epoch": 1.2693333333333334, "percentage": 71.4, "cur_time": "2024-08-21 06:14:42", "elapsed_time": "0:14:43", "remaining_time": "0:05:53", "throughput": "2142.43", "total_tokens": 1892208}
|
||||
{"current_steps": 717, "total_steps": 1000, "loss": 1.1762, "learning_rate": 2.247596299575022e-05, "epoch": 1.2746666666666666, "percentage": 71.7, "cur_time": "2024-08-21 06:14:45", "elapsed_time": "0:14:46", "remaining_time": "0:05:50", "throughput": "2142.88", "total_tokens": 1900432}
|
||||
{"current_steps": 720, "total_steps": 1000, "loss": 1.3507, "learning_rate": 2.2040354826462668e-05, "epoch": 1.28, "percentage": 72.0, "cur_time": "2024-08-21 06:14:49", "elapsed_time": "0:14:50", "remaining_time": "0:05:46", "throughput": "2142.03", "total_tokens": 1907104}
|
||||
{"current_steps": 723, "total_steps": 1000, "loss": 1.1874, "learning_rate": 2.160781274734495e-05, "epoch": 1.2853333333333334, "percentage": 72.3, "cur_time": "2024-08-21 06:14:53", "elapsed_time": "0:14:54", "remaining_time": "0:05:42", "throughput": "2143.63", "total_tokens": 1917024}
|
||||
{"current_steps": 726, "total_steps": 1000, "loss": 1.3059, "learning_rate": 2.117838419151034e-05, "epoch": 1.2906666666666666, "percentage": 72.6, "cur_time": "2024-08-21 06:14:56", "elapsed_time": "0:14:57", "remaining_time": "0:05:38", "throughput": "2142.93", "total_tokens": 1924096}
|
||||
{"current_steps": 729, "total_steps": 1000, "loss": 1.2108, "learning_rate": 2.0752116250639225e-05, "epoch": 1.296, "percentage": 72.9, "cur_time": "2024-08-21 06:15:00", "elapsed_time": "0:15:01", "remaining_time": "0:05:35", "throughput": "2142.59", "total_tokens": 1931488}
|
||||
{"current_steps": 732, "total_steps": 1000, "loss": 1.3217, "learning_rate": 2.0329055669814934e-05, "epoch": 1.3013333333333335, "percentage": 73.2, "cur_time": "2024-08-21 06:15:04", "elapsed_time": "0:15:05", "remaining_time": "0:05:31", "throughput": "2142.95", "total_tokens": 1939856}
|
||||
{"current_steps": 735, "total_steps": 1000, "loss": 1.0395, "learning_rate": 1.9909248842397584e-05, "epoch": 1.3066666666666666, "percentage": 73.5, "cur_time": "2024-08-21 06:15:07", "elapsed_time": "0:15:08", "remaining_time": "0:05:27", "throughput": "2142.17", "total_tokens": 1946528}
|
||||
{"current_steps": 738, "total_steps": 1000, "loss": 1.1858, "learning_rate": 1.9492741804936622e-05, "epoch": 1.312, "percentage": 73.8, "cur_time": "2024-08-21 06:15:11", "elapsed_time": "0:15:12", "remaining_time": "0:05:23", "throughput": "2141.65", "total_tokens": 1953728}
|
||||
{"current_steps": 741, "total_steps": 1000, "loss": 1.2912, "learning_rate": 1.9079580232122303e-05, "epoch": 1.3173333333333335, "percentage": 74.1, "cur_time": "2024-08-21 06:15:14", "elapsed_time": "0:15:16", "remaining_time": "0:05:20", "throughput": "2141.32", "total_tokens": 1961488}
|
||||
{"current_steps": 744, "total_steps": 1000, "loss": 1.1579, "learning_rate": 1.866980943177699e-05, "epoch": 1.3226666666666667, "percentage": 74.4, "cur_time": "2024-08-21 06:15:18", "elapsed_time": "0:15:19", "remaining_time": "0:05:16", "throughput": "2140.74", "total_tokens": 1968480}
|
||||
{"current_steps": 747, "total_steps": 1000, "loss": 1.1789, "learning_rate": 1.8263474339886628e-05, "epoch": 1.328, "percentage": 74.7, "cur_time": "2024-08-21 06:15:22", "elapsed_time": "0:15:23", "remaining_time": "0:05:12", "throughput": "2142.13", "total_tokens": 1977680}
|
||||
{"current_steps": 750, "total_steps": 1000, "loss": 1.2551, "learning_rate": 1.7860619515673033e-05, "epoch": 1.3333333333333333, "percentage": 75.0, "cur_time": "2024-08-21 06:15:25", "elapsed_time": "0:15:26", "remaining_time": "0:05:08", "throughput": "2141.58", "total_tokens": 1985072}
|
||||
{"current_steps": 753, "total_steps": 1000, "loss": 1.2931, "learning_rate": 1.746128913670746e-05, "epoch": 1.3386666666666667, "percentage": 75.3, "cur_time": "2024-08-21 06:15:29", "elapsed_time": "0:15:30", "remaining_time": "0:05:05", "throughput": "2140.72", "total_tokens": 1991744}
|
||||
{"current_steps": 756, "total_steps": 1000, "loss": 1.2748, "learning_rate": 1.7065526994065973e-05, "epoch": 1.3439999999999999, "percentage": 75.6, "cur_time": "2024-08-21 06:15:32", "elapsed_time": "0:15:34", "remaining_time": "0:05:01", "throughput": "2140.32", "total_tokens": 1999136}
|
||||
{"current_steps": 759, "total_steps": 1000, "loss": 1.2862, "learning_rate": 1.667337648752738e-05, "epoch": 1.3493333333333333, "percentage": 75.9, "cur_time": "2024-08-21 06:15:36", "elapsed_time": "0:15:37", "remaining_time": "0:04:57", "throughput": "2140.41", "total_tokens": 2007056}
|
||||
{"current_steps": 762, "total_steps": 1000, "loss": 1.3582, "learning_rate": 1.6284880620813848e-05, "epoch": 1.3546666666666667, "percentage": 76.2, "cur_time": "2024-08-21 06:15:40", "elapsed_time": "0:15:41", "remaining_time": "0:04:53", "throughput": "2141.10", "total_tokens": 2015360}
|
||||
{"current_steps": 765, "total_steps": 1000, "loss": 1.1798, "learning_rate": 1.5900081996875083e-05, "epoch": 1.3599999999999999, "percentage": 76.5, "cur_time": "2024-08-21 06:15:43", "elapsed_time": "0:15:44", "remaining_time": "0:04:50", "throughput": "2141.62", "total_tokens": 2023600}
|
||||
{"current_steps": 768, "total_steps": 1000, "loss": 1.1198, "learning_rate": 1.551902281321651e-05, "epoch": 1.3653333333333333, "percentage": 76.8, "cur_time": "2024-08-21 06:15:47", "elapsed_time": "0:15:48", "remaining_time": "0:04:46", "throughput": "2141.59", "total_tokens": 2031200}
|
||||
{"current_steps": 771, "total_steps": 1000, "loss": 1.2644, "learning_rate": 1.5141744857271778e-05, "epoch": 1.3706666666666667, "percentage": 77.1, "cur_time": "2024-08-21 06:15:50", "elapsed_time": "0:15:51", "remaining_time": "0:04:42", "throughput": "2141.09", "total_tokens": 2038272}
|
||||
{"current_steps": 774, "total_steps": 1000, "loss": 1.383, "learning_rate": 1.4768289501820265e-05, "epoch": 1.376, "percentage": 77.4, "cur_time": "2024-08-21 06:15:54", "elapsed_time": "0:15:55", "remaining_time": "0:04:39", "throughput": "2140.68", "total_tokens": 2045584}
|
||||
{"current_steps": 777, "total_steps": 1000, "loss": 1.2255, "learning_rate": 1.439869770045018e-05, "epoch": 1.3813333333333333, "percentage": 77.7, "cur_time": "2024-08-21 06:15:58", "elapsed_time": "0:15:59", "remaining_time": "0:04:35", "throughput": "2139.84", "total_tokens": 2052272}
|
||||
{"current_steps": 780, "total_steps": 1000, "loss": 1.3218, "learning_rate": 1.4033009983067452e-05, "epoch": 1.3866666666666667, "percentage": 78.0, "cur_time": "2024-08-21 06:16:01", "elapsed_time": "0:16:02", "remaining_time": "0:04:31", "throughput": "2139.31", "total_tokens": 2059296}
|
||||
{"current_steps": 783, "total_steps": 1000, "loss": 1.2757, "learning_rate": 1.367126645145121e-05, "epoch": 1.392, "percentage": 78.3, "cur_time": "2024-08-21 06:16:05", "elapsed_time": "0:16:06", "remaining_time": "0:04:27", "throughput": "2138.98", "total_tokens": 2066624}
|
||||
{"current_steps": 786, "total_steps": 1000, "loss": 1.3221, "learning_rate": 1.3313506774856177e-05, "epoch": 1.3973333333333333, "percentage": 78.6, "cur_time": "2024-08-21 06:16:08", "elapsed_time": "0:16:09", "remaining_time": "0:04:24", "throughput": "2139.95", "total_tokens": 2075472}
|
||||
{"current_steps": 789, "total_steps": 1000, "loss": 1.3386, "learning_rate": 1.29597701856625e-05, "epoch": 1.4026666666666667, "percentage": 78.9, "cur_time": "2024-08-21 06:16:12", "elapsed_time": "0:16:13", "remaining_time": "0:04:20", "throughput": "2140.72", "total_tokens": 2084000}
|
||||
{"current_steps": 792, "total_steps": 1000, "loss": 1.2699, "learning_rate": 1.2610095475073414e-05, "epoch": 1.408, "percentage": 79.2, "cur_time": "2024-08-21 06:16:16", "elapsed_time": "0:16:17", "remaining_time": "0:04:16", "throughput": "2141.49", "total_tokens": 2092592}
|
||||
{"current_steps": 795, "total_steps": 1000, "loss": 1.0779, "learning_rate": 1.22645209888614e-05, "epoch": 1.4133333333333333, "percentage": 79.5, "cur_time": "2024-08-21 06:16:19", "elapsed_time": "0:16:20", "remaining_time": "0:04:12", "throughput": "2140.75", "total_tokens": 2099456}
|
||||
{"current_steps": 798, "total_steps": 1000, "loss": 1.2973, "learning_rate": 1.1923084623163172e-05, "epoch": 1.4186666666666667, "percentage": 79.8, "cur_time": "2024-08-21 06:16:23", "elapsed_time": "0:16:24", "remaining_time": "0:04:09", "throughput": "2141.03", "total_tokens": 2107648}
|
||||
{"current_steps": 801, "total_steps": 1000, "loss": 1.3005, "learning_rate": 1.1585823820323843e-05, "epoch": 1.424, "percentage": 80.1, "cur_time": "2024-08-21 06:16:27", "elapsed_time": "0:16:28", "remaining_time": "0:04:05", "throughput": "2140.00", "total_tokens": 2114960}
|
||||
{"current_steps": 804, "total_steps": 1000, "loss": 1.1668, "learning_rate": 1.1252775564791024e-05, "epoch": 1.4293333333333333, "percentage": 80.4, "cur_time": "2024-08-21 06:16:30", "elapsed_time": "0:16:32", "remaining_time": "0:04:01", "throughput": "2140.27", "total_tokens": 2123216}
|
||||
{"current_steps": 807, "total_steps": 1000, "loss": 1.2009, "learning_rate": 1.0923976379059058e-05, "epoch": 1.4346666666666668, "percentage": 80.7, "cur_time": "2024-08-21 06:16:34", "elapsed_time": "0:16:35", "remaining_time": "0:03:58", "throughput": "2140.75", "total_tokens": 2131392}
|
||||
{"current_steps": 810, "total_steps": 1000, "loss": 1.103, "learning_rate": 1.0599462319663905e-05, "epoch": 1.44, "percentage": 81.0, "cur_time": "2024-08-21 06:16:38", "elapsed_time": "0:16:39", "remaining_time": "0:03:54", "throughput": "2140.85", "total_tokens": 2139360}
|
||||
{"current_steps": 813, "total_steps": 1000, "loss": 1.2085, "learning_rate": 1.0279268973229089e-05, "epoch": 1.4453333333333334, "percentage": 81.3, "cur_time": "2024-08-21 06:16:42", "elapsed_time": "0:16:43", "remaining_time": "0:03:50", "throughput": "2142.07", "total_tokens": 2148976}
|
||||
{"current_steps": 816, "total_steps": 1000, "loss": 1.2273, "learning_rate": 9.963431452563332e-06, "epoch": 1.4506666666666668, "percentage": 81.6, "cur_time": "2024-08-21 06:16:45", "elapsed_time": "0:16:46", "remaining_time": "0:03:47", "throughput": "2142.33", "total_tokens": 2157024}
|
||||
{"current_steps": 819, "total_steps": 1000, "loss": 1.0912, "learning_rate": 9.651984392809914e-06, "epoch": 1.456, "percentage": 81.9, "cur_time": "2024-08-21 06:16:49", "elapsed_time": "0:16:50", "remaining_time": "0:03:43", "throughput": "2142.71", "total_tokens": 2165456}
|
||||
{"current_steps": 822, "total_steps": 1000, "loss": 1.2239, "learning_rate": 9.344961947648623e-06, "epoch": 1.4613333333333334, "percentage": 82.2, "cur_time": "2024-08-21 06:16:53", "elapsed_time": "0:16:54", "remaining_time": "0:03:39", "throughput": "2142.68", "total_tokens": 2173280}
|
||||
{"current_steps": 825, "total_steps": 1000, "loss": 1.2145, "learning_rate": 9.042397785550405e-06, "epoch": 1.4666666666666668, "percentage": 82.5, "cur_time": "2024-08-21 06:16:56", "elapsed_time": "0:16:57", "remaining_time": "0:03:35", "throughput": "2142.33", "total_tokens": 2180496}
|
||||
{"current_steps": 828, "total_steps": 1000, "loss": 1.3317, "learning_rate": 8.744325086085248e-06, "epoch": 1.472, "percentage": 82.8, "cur_time": "2024-08-21 06:17:00", "elapsed_time": "0:17:01", "remaining_time": "0:03:32", "throughput": "2141.91", "total_tokens": 2187648}
|
||||
{"current_steps": 831, "total_steps": 1000, "loss": 1.244, "learning_rate": 8.450776536283594e-06, "epoch": 1.4773333333333334, "percentage": 83.1, "cur_time": "2024-08-21 06:17:03", "elapsed_time": "0:17:04", "remaining_time": "0:03:28", "throughput": "2141.60", "total_tokens": 2194960}
|
||||
{"current_steps": 834, "total_steps": 1000, "loss": 1.2788, "learning_rate": 8.16178432705192e-06, "epoch": 1.4826666666666668, "percentage": 83.4, "cur_time": "2024-08-21 06:17:07", "elapsed_time": "0:17:08", "remaining_time": "0:03:24", "throughput": "2141.86", "total_tokens": 2203072}
|
||||
{"current_steps": 837, "total_steps": 1000, "loss": 1.1167, "learning_rate": 7.877380149642626e-06, "epoch": 1.488, "percentage": 83.7, "cur_time": "2024-08-21 06:17:11", "elapsed_time": "0:17:12", "remaining_time": "0:03:21", "throughput": "2142.65", "total_tokens": 2211808}
|
||||
{"current_steps": 840, "total_steps": 1000, "loss": 1.4073, "learning_rate": 7.597595192178702e-06, "epoch": 1.4933333333333334, "percentage": 84.0, "cur_time": "2024-08-21 06:17:14", "elapsed_time": "0:17:16", "remaining_time": "0:03:17", "throughput": "2143.42", "total_tokens": 2220656}
|
||||
{"current_steps": 843, "total_steps": 1000, "loss": 1.3246, "learning_rate": 7.322460136233622e-06, "epoch": 1.4986666666666666, "percentage": 84.3, "cur_time": "2024-08-21 06:17:18", "elapsed_time": "0:17:19", "remaining_time": "0:03:13", "throughput": "2143.63", "total_tokens": 2228368}
|
||||
{"current_steps": 846, "total_steps": 1000, "loss": 1.2071, "learning_rate": 7.052005153466779e-06, "epoch": 1.504, "percentage": 84.6, "cur_time": "2024-08-21 06:17:22", "elapsed_time": "0:17:23", "remaining_time": "0:03:09", "throughput": "2143.96", "total_tokens": 2236560}
|
||||
{"current_steps": 849, "total_steps": 1000, "loss": 1.2258, "learning_rate": 6.786259902314768e-06, "epoch": 1.5093333333333332, "percentage": 84.9, "cur_time": "2024-08-21 06:17:25", "elapsed_time": "0:17:26", "remaining_time": "0:03:06", "throughput": "2143.84", "total_tokens": 2244352}
|
||||
{"current_steps": 852, "total_steps": 1000, "loss": 1.3457, "learning_rate": 6.52525352473905e-06, "epoch": 1.5146666666666668, "percentage": 85.2, "cur_time": "2024-08-21 06:17:29", "elapsed_time": "0:17:30", "remaining_time": "0:03:02", "throughput": "2144.37", "total_tokens": 2252976}
|
||||
{"current_steps": 855, "total_steps": 1000, "loss": 1.2233, "learning_rate": 6.269014643030213e-06, "epoch": 1.52, "percentage": 85.5, "cur_time": "2024-08-21 06:17:33", "elapsed_time": "0:17:34", "remaining_time": "0:02:58", "throughput": "2144.39", "total_tokens": 2260800}
|
||||
{"current_steps": 858, "total_steps": 1000, "loss": 1.2691, "learning_rate": 6.017571356669183e-06, "epoch": 1.5253333333333332, "percentage": 85.8, "cur_time": "2024-08-21 06:17:36", "elapsed_time": "0:17:37", "remaining_time": "0:02:55", "throughput": "2145.25", "total_tokens": 2269632}
|
||||
{"current_steps": 861, "total_steps": 1000, "loss": 1.1786, "learning_rate": 5.770951239245803e-06, "epoch": 1.5306666666666666, "percentage": 86.1, "cur_time": "2024-08-21 06:17:40", "elapsed_time": "0:17:41", "remaining_time": "0:02:51", "throughput": "2144.96", "total_tokens": 2276864}
|
||||
{"current_steps": 864, "total_steps": 1000, "loss": 1.1514, "learning_rate": 5.529181335435124e-06, "epoch": 1.536, "percentage": 86.4, "cur_time": "2024-08-21 06:17:43", "elapsed_time": "0:17:45", "remaining_time": "0:02:47", "throughput": "2144.31", "total_tokens": 2283808}
|
||||
{"current_steps": 867, "total_steps": 1000, "loss": 1.245, "learning_rate": 5.292288158031594e-06, "epoch": 1.5413333333333332, "percentage": 86.7, "cur_time": "2024-08-21 06:17:47", "elapsed_time": "0:17:48", "remaining_time": "0:02:43", "throughput": "2143.86", "total_tokens": 2291072}
|
||||
{"current_steps": 870, "total_steps": 1000, "loss": 1.1263, "learning_rate": 5.060297685041659e-06, "epoch": 1.5466666666666666, "percentage": 87.0, "cur_time": "2024-08-21 06:17:51", "elapsed_time": "0:17:52", "remaining_time": "0:02:40", "throughput": "2143.63", "total_tokens": 2298448}
|
||||
{"current_steps": 873, "total_steps": 1000, "loss": 1.2041, "learning_rate": 4.833235356834959e-06, "epoch": 1.552, "percentage": 87.3, "cur_time": "2024-08-21 06:17:54", "elapsed_time": "0:17:55", "remaining_time": "0:02:36", "throughput": "2144.49", "total_tokens": 2307248}
|
||||
{"current_steps": 876, "total_steps": 1000, "loss": 1.1853, "learning_rate": 4.611126073354571e-06, "epoch": 1.5573333333333332, "percentage": 87.6, "cur_time": "2024-08-21 06:17:58", "elapsed_time": "0:17:59", "remaining_time": "0:02:32", "throughput": "2145.08", "total_tokens": 2316192}
|
||||
{"current_steps": 879, "total_steps": 1000, "loss": 1.2726, "learning_rate": 4.3939941913863525e-06, "epoch": 1.5626666666666666, "percentage": 87.9, "cur_time": "2024-08-21 06:18:02", "elapsed_time": "0:18:03", "remaining_time": "0:02:29", "throughput": "2144.25", "total_tokens": 2322704}
|
||||
{"current_steps": 882, "total_steps": 1000, "loss": 1.2391, "learning_rate": 4.181863521888019e-06, "epoch": 1.568, "percentage": 88.2, "cur_time": "2024-08-21 06:18:05", "elapsed_time": "0:18:06", "remaining_time": "0:02:25", "throughput": "2143.80", "total_tokens": 2329600}
|
||||
{"current_steps": 885, "total_steps": 1000, "loss": 1.2163, "learning_rate": 3.974757327377981e-06, "epoch": 1.5733333333333333, "percentage": 88.5, "cur_time": "2024-08-21 06:18:09", "elapsed_time": "0:18:10", "remaining_time": "0:02:21", "throughput": "2143.50", "total_tokens": 2337120}
|
||||
{"current_steps": 888, "total_steps": 1000, "loss": 1.2243, "learning_rate": 3.772698319384349e-06, "epoch": 1.5786666666666667, "percentage": 88.8, "cur_time": "2024-08-21 06:18:12", "elapsed_time": "0:18:13", "remaining_time": "0:02:17", "throughput": "2143.45", "total_tokens": 2344896}
|
||||
{"current_steps": 891, "total_steps": 1000, "loss": 1.3121, "learning_rate": 3.575708655954324e-06, "epoch": 1.584, "percentage": 89.1, "cur_time": "2024-08-21 06:18:16", "elapsed_time": "0:18:17", "remaining_time": "0:02:14", "throughput": "2144.55", "total_tokens": 2354384}
|
||||
{"current_steps": 894, "total_steps": 1000, "loss": 1.1817, "learning_rate": 3.3838099392243916e-06, "epoch": 1.5893333333333333, "percentage": 89.4, "cur_time": "2024-08-21 06:18:20", "elapsed_time": "0:18:21", "remaining_time": "0:02:10", "throughput": "2145.53", "total_tokens": 2363984}
|
||||
{"current_steps": 897, "total_steps": 1000, "loss": 1.2091, "learning_rate": 3.197023213051337e-06, "epoch": 1.5946666666666667, "percentage": 89.7, "cur_time": "2024-08-21 06:18:24", "elapsed_time": "0:18:25", "remaining_time": "0:02:06", "throughput": "2146.32", "total_tokens": 2373008}
|
||||
{"current_steps": 900, "total_steps": 1000, "loss": 1.2872, "learning_rate": 3.0153689607045845e-06, "epoch": 1.6, "percentage": 90.0, "cur_time": "2024-08-21 06:18:28", "elapsed_time": "0:18:29", "remaining_time": "0:02:03", "throughput": "2145.83", "total_tokens": 2379984}
|
||||
{"current_steps": 903, "total_steps": 1000, "loss": 1.2326, "learning_rate": 2.8388671026199522e-06, "epoch": 1.6053333333333333, "percentage": 90.3, "cur_time": "2024-08-21 06:18:31", "elapsed_time": "0:18:33", "remaining_time": "0:01:59", "throughput": "2144.99", "total_tokens": 2387472}
|
||||
{"current_steps": 906, "total_steps": 1000, "loss": 1.2061, "learning_rate": 2.667536994215186e-06, "epoch": 1.6106666666666667, "percentage": 90.6, "cur_time": "2024-08-21 06:18:35", "elapsed_time": "0:18:36", "remaining_time": "0:01:55", "throughput": "2145.10", "total_tokens": 2395504}
|
||||
{"current_steps": 909, "total_steps": 1000, "loss": 1.3237, "learning_rate": 2.501397423767382e-06, "epoch": 1.616, "percentage": 90.9, "cur_time": "2024-08-21 06:18:39", "elapsed_time": "0:18:40", "remaining_time": "0:01:52", "throughput": "2144.73", "total_tokens": 2402752}
|
||||
{"current_steps": 912, "total_steps": 1000, "loss": 1.2587, "learning_rate": 2.340466610352654e-06, "epoch": 1.6213333333333333, "percentage": 91.2, "cur_time": "2024-08-21 06:18:42", "elapsed_time": "0:18:43", "remaining_time": "0:01:48", "throughput": "2145.14", "total_tokens": 2411056}
|
||||
{"current_steps": 915, "total_steps": 1000, "loss": 1.2855, "learning_rate": 2.1847622018482283e-06, "epoch": 1.6266666666666667, "percentage": 91.5, "cur_time": "2024-08-21 06:18:46", "elapsed_time": "0:18:47", "remaining_time": "0:01:44", "throughput": "2145.39", "total_tokens": 2419232}
|
||||
{"current_steps": 918, "total_steps": 1000, "loss": 1.3, "learning_rate": 2.0343012729971243e-06, "epoch": 1.6320000000000001, "percentage": 91.8, "cur_time": "2024-08-21 06:18:50", "elapsed_time": "0:18:51", "remaining_time": "0:01:41", "throughput": "2145.24", "total_tokens": 2426768}
|
||||
{"current_steps": 921, "total_steps": 1000, "loss": 1.257, "learning_rate": 1.8891003235357308e-06, "epoch": 1.6373333333333333, "percentage": 92.1, "cur_time": "2024-08-21 06:18:53", "elapsed_time": "0:18:54", "remaining_time": "0:01:37", "throughput": "2145.00", "total_tokens": 2434160}
|
||||
{"current_steps": 924, "total_steps": 1000, "loss": 1.2312, "learning_rate": 1.7491752763844293e-06, "epoch": 1.6426666666666667, "percentage": 92.4, "cur_time": "2024-08-21 06:18:57", "elapsed_time": "0:18:58", "remaining_time": "0:01:33", "throughput": "2145.47", "total_tokens": 2442512}
|
||||
{"current_steps": 927, "total_steps": 1000, "loss": 1.1204, "learning_rate": 1.6145414759014431e-06, "epoch": 1.6480000000000001, "percentage": 92.7, "cur_time": "2024-08-21 06:19:01", "elapsed_time": "0:19:02", "remaining_time": "0:01:29", "throughput": "2145.58", "total_tokens": 2450480}
|
||||
{"current_steps": 930, "total_steps": 1000, "loss": 1.2204, "learning_rate": 1.4852136862001764e-06, "epoch": 1.6533333333333333, "percentage": 93.0, "cur_time": "2024-08-21 06:19:04", "elapsed_time": "0:19:05", "remaining_time": "0:01:26", "throughput": "2146.15", "total_tokens": 2459168}
|
||||
{"current_steps": 933, "total_steps": 1000, "loss": 1.2004, "learning_rate": 1.3612060895301759e-06, "epoch": 1.6586666666666665, "percentage": 93.3, "cur_time": "2024-08-21 06:19:08", "elapsed_time": "0:19:09", "remaining_time": "0:01:22", "throughput": "2145.49", "total_tokens": 2466032}
|
||||
{"current_steps": 936, "total_steps": 1000, "loss": 1.3066, "learning_rate": 1.2425322847218368e-06, "epoch": 1.6640000000000001, "percentage": 93.6, "cur_time": "2024-08-21 06:19:12", "elapsed_time": "0:19:13", "remaining_time": "0:01:18", "throughput": "2145.85", "total_tokens": 2474352}
|
||||
{"current_steps": 939, "total_steps": 1000, "loss": 1.1302, "learning_rate": 1.1292052856952062e-06, "epoch": 1.6693333333333333, "percentage": 93.9, "cur_time": "2024-08-21 06:19:15", "elapsed_time": "0:19:16", "remaining_time": "0:01:15", "throughput": "2145.56", "total_tokens": 2481792}
|
||||
{"current_steps": 942, "total_steps": 1000, "loss": 1.0001, "learning_rate": 1.0212375200327973e-06, "epoch": 1.6746666666666665, "percentage": 94.2, "cur_time": "2024-08-21 06:19:19", "elapsed_time": "0:19:20", "remaining_time": "0:01:11", "throughput": "2145.55", "total_tokens": 2489424}
|
||||
{"current_steps": 945, "total_steps": 1000, "loss": 1.2162, "learning_rate": 9.186408276168013e-07, "epoch": 1.6800000000000002, "percentage": 94.5, "cur_time": "2024-08-21 06:19:22", "elapsed_time": "0:19:23", "remaining_time": "0:01:07", "throughput": "2145.96", "total_tokens": 2497888}
|
||||
{"current_steps": 948, "total_steps": 1000, "loss": 1.3685, "learning_rate": 8.214264593307098e-07, "epoch": 1.6853333333333333, "percentage": 94.8, "cur_time": "2024-08-21 06:19:26", "elapsed_time": "0:19:27", "remaining_time": "0:01:04", "throughput": "2145.39", "total_tokens": 2504592}
|
||||
{"current_steps": 951, "total_steps": 1000, "loss": 1.3391, "learning_rate": 7.296050758254957e-07, "epoch": 1.6906666666666665, "percentage": 95.1, "cur_time": "2024-08-21 06:19:29", "elapsed_time": "0:19:31", "remaining_time": "0:01:00", "throughput": "2145.56", "total_tokens": 2512480}
|
||||
{"current_steps": 954, "total_steps": 1000, "loss": 1.2548, "learning_rate": 6.431867463506048e-07, "epoch": 1.696, "percentage": 95.4, "cur_time": "2024-08-21 06:19:33", "elapsed_time": "0:19:34", "remaining_time": "0:00:56", "throughput": "2145.17", "total_tokens": 2519504}
|
||||
{"current_steps": 957, "total_steps": 1000, "loss": 1.3917, "learning_rate": 5.621809476497098e-07, "epoch": 1.7013333333333334, "percentage": 95.7, "cur_time": "2024-08-21 06:19:37", "elapsed_time": "0:19:38", "remaining_time": "0:00:52", "throughput": "2145.30", "total_tokens": 2527360}
|
||||
{"current_steps": 960, "total_steps": 1000, "loss": 1.1987, "learning_rate": 4.865965629214819e-07, "epoch": 1.7066666666666666, "percentage": 96.0, "cur_time": "2024-08-21 06:19:40", "elapsed_time": "0:19:41", "remaining_time": "0:00:49", "throughput": "2144.38", "total_tokens": 2533856}
|
||||
{"current_steps": 963, "total_steps": 1000, "loss": 1.4638, "learning_rate": 4.1644188084548063e-07, "epoch": 1.712, "percentage": 96.3, "cur_time": "2024-08-21 06:19:44", "elapsed_time": "0:19:45", "remaining_time": "0:00:45", "throughput": "2144.10", "total_tokens": 2541616}
|
||||
{"current_steps": 966, "total_steps": 1000, "loss": 1.1836, "learning_rate": 3.517245946731529e-07, "epoch": 1.7173333333333334, "percentage": 96.6, "cur_time": "2024-08-21 06:19:47", "elapsed_time": "0:19:48", "remaining_time": "0:00:41", "throughput": "2143.98", "total_tokens": 2549136}
|
||||
{"current_steps": 969, "total_steps": 1000, "loss": 1.1814, "learning_rate": 2.924518013842303e-07, "epoch": 1.7226666666666666, "percentage": 96.9, "cur_time": "2024-08-21 06:19:51", "elapsed_time": "0:19:52", "remaining_time": "0:00:38", "throughput": "2144.41", "total_tokens": 2557584}
|
||||
{"current_steps": 972, "total_steps": 1000, "loss": 1.2356, "learning_rate": 2.386300009084408e-07, "epoch": 1.728, "percentage": 97.2, "cur_time": "2024-08-21 06:19:55", "elapsed_time": "0:19:56", "remaining_time": "0:00:34", "throughput": "2145.46", "total_tokens": 2566880}
|
||||
{"current_steps": 975, "total_steps": 1000, "loss": 1.1505, "learning_rate": 1.9026509541272275e-07, "epoch": 1.7333333333333334, "percentage": 97.5, "cur_time": "2024-08-21 06:19:59", "elapsed_time": "0:20:00", "remaining_time": "0:00:30", "throughput": "2145.98", "total_tokens": 2575552}
|
||||
{"current_steps": 978, "total_steps": 1000, "loss": 1.1444, "learning_rate": 1.4736238865398765e-07, "epoch": 1.7386666666666666, "percentage": 97.8, "cur_time": "2024-08-21 06:20:02", "elapsed_time": "0:20:03", "remaining_time": "0:00:27", "throughput": "2146.21", "total_tokens": 2583600}
|
||||
{"current_steps": 981, "total_steps": 1000, "loss": 1.3877, "learning_rate": 1.0992658539750178e-07, "epoch": 1.744, "percentage": 98.1, "cur_time": "2024-08-21 06:20:06", "elapsed_time": "0:20:07", "remaining_time": "0:00:23", "throughput": "2146.15", "total_tokens": 2591536}
|
||||
{"current_steps": 984, "total_steps": 1000, "loss": 1.3222, "learning_rate": 7.796179090094891e-08, "epoch": 1.7493333333333334, "percentage": 98.4, "cur_time": "2024-08-21 06:20:10", "elapsed_time": "0:20:11", "remaining_time": "0:00:19", "throughput": "2146.65", "total_tokens": 2599920}
|
||||
{"current_steps": 987, "total_steps": 1000, "loss": 1.1343, "learning_rate": 5.1471510464268236e-08, "epoch": 1.7546666666666666, "percentage": 98.7, "cur_time": "2024-08-21 06:20:13", "elapsed_time": "0:20:14", "remaining_time": "0:00:15", "throughput": "2146.51", "total_tokens": 2607344}
|
||||
{"current_steps": 990, "total_steps": 1000, "loss": 1.1924, "learning_rate": 3.04586490452119e-08, "epoch": 1.76, "percentage": 99.0, "cur_time": "2024-08-21 06:20:17", "elapsed_time": "0:20:18", "remaining_time": "0:00:12", "throughput": "2146.78", "total_tokens": 2615792}
|
||||
{"current_steps": 993, "total_steps": 1000, "loss": 1.2759, "learning_rate": 1.4925510940844156e-08, "epoch": 1.7653333333333334, "percentage": 99.3, "cur_time": "2024-08-21 06:20:20", "elapsed_time": "0:20:22", "remaining_time": "0:00:08", "throughput": "2147.12", "total_tokens": 2623936}
|
||||
{"current_steps": 996, "total_steps": 1000, "loss": 1.2298, "learning_rate": 4.873799534788059e-09, "epoch": 1.7706666666666666, "percentage": 99.6, "cur_time": "2024-08-21 06:20:24", "elapsed_time": "0:20:25", "remaining_time": "0:00:04", "throughput": "2146.94", "total_tokens": 2631440}
|
||||
{"current_steps": 999, "total_steps": 1000, "loss": 1.1737, "learning_rate": 3.0461711048035415e-10, "epoch": 1.776, "percentage": 99.9, "cur_time": "2024-08-21 06:20:28", "elapsed_time": "0:20:29", "remaining_time": "0:00:01", "throughput": "2146.46", "total_tokens": 2638432}
|
||||
{"current_steps": 1000, "total_steps": 1000, "eval_loss": 1.2747279405593872, "epoch": 1.7777777777777777, "percentage": 100.0, "cur_time": "2024-08-21 06:20:50", "elapsed_time": "0:20:51", "remaining_time": "0:00:00", "throughput": "2110.10", "total_tokens": 2640912}
|
||||
{"current_steps": 1000, "total_steps": 1000, "epoch": 1.7777777777777777, "percentage": 100.0, "cur_time": "2024-08-21 06:20:50", "elapsed_time": "0:20:51", "remaining_time": "0:00:00", "throughput": "2109.57", "total_tokens": 2640912}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
After Width: | Height: | Size: 40 KiB |
Binary file not shown.
After Width: | Height: | Size: 58 KiB |
Loading…
Reference in New Issue