joelniklaus commited on
Commit
32d7848
1 Parent(s): 71ea8b7

Training in progress, step 450000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df30fe6bd68fca40ee22ea8ec37ae156b39effe17da0e8c6bd90f869f1186d37
3
  size 1475917081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a47c6702ccdd35d7295efcf2fba6b573475079a73262f18340961ae7dc92e1b0
3
  size 1475917081
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:212e05f8e854456a203541ab3ff307559133e87b152fda8eac6ef2b267ce568c
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7c6033185db71598425248ac6246a6b679c43326a22e6612d904f287b44056a
3
  size 737971755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e2fa1a5b18a74204480f2aa3043d9976bc826b4d2feee067fc7c05a5c355b7a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd5b5efb14f7d008d77dcc855d7980fade0c762c06d0b13cb3eb0407c77b32e
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e4ecef8b58c710458716a0153f8519567dd2a15c4728bc445f0af4d3fb15782
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78e735efa7e40e0dd22dcac5cb3724b0cbe120563d603ea4b62f22b0f40fc602
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4,
5
- "global_step": 400000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -2470,11 +2470,319 @@
2470
  "eval_samples_per_second": 51.638,
2471
  "eval_steps_per_second": 0.413,
2472
  "step": 400000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2473
  }
2474
  ],
2475
  "max_steps": 1000000,
2476
  "num_train_epochs": 9223372036854775807,
2477
- "total_flos": 6.7457091895296e+18,
2478
  "trial_name": null,
2479
  "trial_params": null
2480
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.45,
5
+ "global_step": 450000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
2470
  "eval_samples_per_second": 51.638,
2471
  "eval_steps_per_second": 0.413,
2472
  "step": 400000
2473
+ },
2474
+ {
2475
+ "epoch": 0.4,
2476
+ "learning_rate": 6.993324133116726e-05,
2477
+ "loss": 0.8636,
2478
+ "step": 401000
2479
+ },
2480
+ {
2481
+ "epoch": 0.4,
2482
+ "learning_rate": 6.978149344295242e-05,
2483
+ "loss": 0.8602,
2484
+ "step": 402000
2485
+ },
2486
+ {
2487
+ "epoch": 0.4,
2488
+ "learning_rate": 6.962952922749457e-05,
2489
+ "loss": 0.8711,
2490
+ "step": 403000
2491
+ },
2492
+ {
2493
+ "epoch": 0.4,
2494
+ "learning_rate": 6.947735034665002e-05,
2495
+ "loss": 0.8506,
2496
+ "step": 404000
2497
+ },
2498
+ {
2499
+ "epoch": 0.41,
2500
+ "learning_rate": 6.932495846462261e-05,
2501
+ "loss": 0.8464,
2502
+ "step": 405000
2503
+ },
2504
+ {
2505
+ "epoch": 0.41,
2506
+ "learning_rate": 6.917235524794558e-05,
2507
+ "loss": 0.8657,
2508
+ "step": 406000
2509
+ },
2510
+ {
2511
+ "epoch": 0.41,
2512
+ "learning_rate": 6.901954236546323e-05,
2513
+ "loss": 0.8559,
2514
+ "step": 407000
2515
+ },
2516
+ {
2517
+ "epoch": 0.41,
2518
+ "learning_rate": 6.886652148831279e-05,
2519
+ "loss": 0.8661,
2520
+ "step": 408000
2521
+ },
2522
+ {
2523
+ "epoch": 0.41,
2524
+ "learning_rate": 6.871329428990602e-05,
2525
+ "loss": 0.8882,
2526
+ "step": 409000
2527
+ },
2528
+ {
2529
+ "epoch": 0.41,
2530
+ "learning_rate": 6.855986244591104e-05,
2531
+ "loss": 0.902,
2532
+ "step": 410000
2533
+ },
2534
+ {
2535
+ "epoch": 0.41,
2536
+ "learning_rate": 6.840622763423391e-05,
2537
+ "loss": 0.8794,
2538
+ "step": 411000
2539
+ },
2540
+ {
2541
+ "epoch": 0.41,
2542
+ "learning_rate": 6.825239153500029e-05,
2543
+ "loss": 0.8736,
2544
+ "step": 412000
2545
+ },
2546
+ {
2547
+ "epoch": 0.41,
2548
+ "learning_rate": 6.809835583053715e-05,
2549
+ "loss": 0.8778,
2550
+ "step": 413000
2551
+ },
2552
+ {
2553
+ "epoch": 0.41,
2554
+ "learning_rate": 6.794412220535426e-05,
2555
+ "loss": 0.8906,
2556
+ "step": 414000
2557
+ },
2558
+ {
2559
+ "epoch": 0.41,
2560
+ "learning_rate": 6.778969234612584e-05,
2561
+ "loss": 0.8881,
2562
+ "step": 415000
2563
+ },
2564
+ {
2565
+ "epoch": 0.42,
2566
+ "learning_rate": 6.763506794167208e-05,
2567
+ "loss": 0.8966,
2568
+ "step": 416000
2569
+ },
2570
+ {
2571
+ "epoch": 0.42,
2572
+ "learning_rate": 6.748025068294067e-05,
2573
+ "loss": 0.879,
2574
+ "step": 417000
2575
+ },
2576
+ {
2577
+ "epoch": 0.42,
2578
+ "learning_rate": 6.732524226298841e-05,
2579
+ "loss": 0.8931,
2580
+ "step": 418000
2581
+ },
2582
+ {
2583
+ "epoch": 0.42,
2584
+ "learning_rate": 6.71700443769625e-05,
2585
+ "loss": 0.8737,
2586
+ "step": 419000
2587
+ },
2588
+ {
2589
+ "epoch": 0.42,
2590
+ "learning_rate": 6.701465872208216e-05,
2591
+ "loss": 0.8801,
2592
+ "step": 420000
2593
+ },
2594
+ {
2595
+ "epoch": 0.42,
2596
+ "learning_rate": 6.685908699762002e-05,
2597
+ "loss": 0.8759,
2598
+ "step": 421000
2599
+ },
2600
+ {
2601
+ "epoch": 0.42,
2602
+ "learning_rate": 6.670333090488356e-05,
2603
+ "loss": 0.8875,
2604
+ "step": 422000
2605
+ },
2606
+ {
2607
+ "epoch": 0.42,
2608
+ "learning_rate": 6.654739214719641e-05,
2609
+ "loss": 0.8751,
2610
+ "step": 423000
2611
+ },
2612
+ {
2613
+ "epoch": 0.42,
2614
+ "learning_rate": 6.639127242987988e-05,
2615
+ "loss": 0.8667,
2616
+ "step": 424000
2617
+ },
2618
+ {
2619
+ "epoch": 0.42,
2620
+ "learning_rate": 6.623497346023418e-05,
2621
+ "loss": 0.865,
2622
+ "step": 425000
2623
+ },
2624
+ {
2625
+ "epoch": 0.43,
2626
+ "learning_rate": 6.607849694751977e-05,
2627
+ "loss": 0.8851,
2628
+ "step": 426000
2629
+ },
2630
+ {
2631
+ "epoch": 0.43,
2632
+ "learning_rate": 6.592184460293877e-05,
2633
+ "loss": 0.9006,
2634
+ "step": 427000
2635
+ },
2636
+ {
2637
+ "epoch": 0.43,
2638
+ "learning_rate": 6.576501813961609e-05,
2639
+ "loss": 0.9099,
2640
+ "step": 428000
2641
+ },
2642
+ {
2643
+ "epoch": 0.43,
2644
+ "learning_rate": 6.56080192725808e-05,
2645
+ "loss": 0.8892,
2646
+ "step": 429000
2647
+ },
2648
+ {
2649
+ "epoch": 0.43,
2650
+ "learning_rate": 6.545084971874738e-05,
2651
+ "loss": 0.8928,
2652
+ "step": 430000
2653
+ },
2654
+ {
2655
+ "epoch": 0.43,
2656
+ "learning_rate": 6.529351119689688e-05,
2657
+ "loss": 0.9009,
2658
+ "step": 431000
2659
+ },
2660
+ {
2661
+ "epoch": 0.43,
2662
+ "learning_rate": 6.513600542765817e-05,
2663
+ "loss": 0.9041,
2664
+ "step": 432000
2665
+ },
2666
+ {
2667
+ "epoch": 0.43,
2668
+ "learning_rate": 6.497833413348909e-05,
2669
+ "loss": 0.896,
2670
+ "step": 433000
2671
+ },
2672
+ {
2673
+ "epoch": 0.43,
2674
+ "learning_rate": 6.48204990386577e-05,
2675
+ "loss": 0.8931,
2676
+ "step": 434000
2677
+ },
2678
+ {
2679
+ "epoch": 0.43,
2680
+ "learning_rate": 6.466250186922325e-05,
2681
+ "loss": 0.878,
2682
+ "step": 435000
2683
+ },
2684
+ {
2685
+ "epoch": 0.44,
2686
+ "learning_rate": 6.450434435301751e-05,
2687
+ "loss": 0.855,
2688
+ "step": 436000
2689
+ },
2690
+ {
2691
+ "epoch": 0.44,
2692
+ "learning_rate": 6.43460282196257e-05,
2693
+ "loss": 0.8781,
2694
+ "step": 437000
2695
+ },
2696
+ {
2697
+ "epoch": 0.44,
2698
+ "learning_rate": 6.418755520036775e-05,
2699
+ "loss": 0.844,
2700
+ "step": 438000
2701
+ },
2702
+ {
2703
+ "epoch": 0.44,
2704
+ "learning_rate": 6.402892702827916e-05,
2705
+ "loss": 0.8598,
2706
+ "step": 439000
2707
+ },
2708
+ {
2709
+ "epoch": 0.44,
2710
+ "learning_rate": 6.387014543809223e-05,
2711
+ "loss": 0.8421,
2712
+ "step": 440000
2713
+ },
2714
+ {
2715
+ "epoch": 0.44,
2716
+ "learning_rate": 6.371121216621698e-05,
2717
+ "loss": 0.8439,
2718
+ "step": 441000
2719
+ },
2720
+ {
2721
+ "epoch": 0.44,
2722
+ "learning_rate": 6.355212895072223e-05,
2723
+ "loss": 0.8696,
2724
+ "step": 442000
2725
+ },
2726
+ {
2727
+ "epoch": 0.44,
2728
+ "learning_rate": 6.339289753131649e-05,
2729
+ "loss": 0.8341,
2730
+ "step": 443000
2731
+ },
2732
+ {
2733
+ "epoch": 0.44,
2734
+ "learning_rate": 6.323351964932908e-05,
2735
+ "loss": 0.8496,
2736
+ "step": 444000
2737
+ },
2738
+ {
2739
+ "epoch": 0.45,
2740
+ "learning_rate": 6.307399704769099e-05,
2741
+ "loss": 0.8357,
2742
+ "step": 445000
2743
+ },
2744
+ {
2745
+ "epoch": 0.45,
2746
+ "learning_rate": 6.291433147091583e-05,
2747
+ "loss": 0.8524,
2748
+ "step": 446000
2749
+ },
2750
+ {
2751
+ "epoch": 0.45,
2752
+ "learning_rate": 6.275452466508077e-05,
2753
+ "loss": 0.842,
2754
+ "step": 447000
2755
+ },
2756
+ {
2757
+ "epoch": 0.45,
2758
+ "learning_rate": 6.259457837780742e-05,
2759
+ "loss": 0.827,
2760
+ "step": 448000
2761
+ },
2762
+ {
2763
+ "epoch": 0.45,
2764
+ "learning_rate": 6.243449435824276e-05,
2765
+ "loss": 0.853,
2766
+ "step": 449000
2767
+ },
2768
+ {
2769
+ "epoch": 0.45,
2770
+ "learning_rate": 6.227427435703997e-05,
2771
+ "loss": 0.8136,
2772
+ "step": 450000
2773
+ },
2774
+ {
2775
+ "epoch": 0.45,
2776
+ "eval_loss": 0.6146513223648071,
2777
+ "eval_runtime": 103.5315,
2778
+ "eval_samples_per_second": 48.294,
2779
+ "eval_steps_per_second": 0.386,
2780
+ "step": 450000
2781
  }
2782
  ],
2783
  "max_steps": 1000000,
2784
  "num_train_epochs": 9223372036854775807,
2785
+ "total_flos": 7.5889228382208e+18,
2786
  "trial_name": null,
2787
  "trial_params": null
2788
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:212e05f8e854456a203541ab3ff307559133e87b152fda8eac6ef2b267ce568c
3
  size 737971755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7c6033185db71598425248ac6246a6b679c43326a22e6612d904f287b44056a
3
  size 737971755
runs/Jan25_00-37-02_t1v-n-9f780742-w-0/events.out.tfevents.1674607228.t1v-n-9f780742-w-0.3357200.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4c0bb664dfae777337994328c5a785438c146f717652b2c916c004acc03b050
3
- size 20314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bc8ef4a5bc22172e3dabbe0597803ba02887de7a0308d727abd6aee1c6d9919
3
+ size 28590