besimray commited on
Commit
df7f5a4
·
verified ·
1 Parent(s): c2b76f7

Training in progress, step 140, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1f94ce60ad18907bfe378be4ba63c3cb07211d25772e7578153e59c360d0334
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fdc91dddc9a166a95b38156dbe0a1400b141fa6a01a22b79fcbf7762105f13a
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af22c1f6025a1e35c54cfaffb9aa264061cac162f2b52caade7115ff260d713e
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:298c35aeb88b8e142a8b2e9ae3b30b40502512f283bdbad9131877cf177376cb
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad4c4d2d769c6f52183fdfe62140ef02b36aa1e936b1d8050f51672d3d58fb1e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43a84554481cf5ef7909ff3c93940e7b945a013d30bf7c3f575e7278bcae704e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3511d75105f53c278279e3dade6f856082c8693b0424c0bf567bdcf23028dd2b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c049ad9892b8ae242eb26f06a6af3edec6b865f6613ddc97103e21f4231f6420
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 1.5968632698059082,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-130",
4
- "epoch": 0.03671020120014119,
5
  "eval_steps": 10,
6
- "global_step": 130,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1029,6 +1029,84 @@
1029
  "eval_samples_per_second": 5.599,
1030
  "eval_steps_per_second": 5.599,
1031
  "step": 130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032
  }
1033
  ],
1034
  "logging_steps": 1,
@@ -1043,7 +1121,7 @@
1043
  "early_stopping_threshold": 0.0
1044
  },
1045
  "attributes": {
1046
- "early_stopping_patience_counter": 0
1047
  }
1048
  },
1049
  "TrainerControl": {
@@ -1057,7 +1135,7 @@
1057
  "attributes": {}
1058
  }
1059
  },
1060
- "total_flos": 1.272444660744192e+16,
1061
  "train_batch_size": 1,
1062
  "trial_name": null,
1063
  "trial_params": null
 
1
  {
2
  "best_metric": 1.5968632698059082,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-130",
4
+ "epoch": 0.039534062830921285,
5
  "eval_steps": 10,
6
+ "global_step": 140,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1029
  "eval_samples_per_second": 5.599,
1030
  "eval_steps_per_second": 5.599,
1031
  "step": 130
1032
+ },
1033
+ {
1034
+ "epoch": 0.036992587363219205,
1035
+ "grad_norm": 0.9281368851661682,
1036
+ "learning_rate": 0.0001713874307839863,
1037
+ "loss": 1.8154,
1038
+ "step": 131
1039
+ },
1040
+ {
1041
+ "epoch": 0.03727497352629721,
1042
+ "grad_norm": 2.6511611938476562,
1043
+ "learning_rate": 0.0001709369921005258,
1044
+ "loss": 1.4,
1045
+ "step": 132
1046
+ },
1047
+ {
1048
+ "epoch": 0.03755735968937522,
1049
+ "grad_norm": 1.9646121263504028,
1050
+ "learning_rate": 0.00017048363747516117,
1051
+ "loss": 1.4126,
1052
+ "step": 133
1053
+ },
1054
+ {
1055
+ "epoch": 0.03783974585245323,
1056
+ "grad_norm": 1.7208032608032227,
1057
+ "learning_rate": 0.00017002738554352552,
1058
+ "loss": 0.5568,
1059
+ "step": 134
1060
+ },
1061
+ {
1062
+ "epoch": 0.03812213201553124,
1063
+ "grad_norm": 2.9022722244262695,
1064
+ "learning_rate": 0.00016956825506034867,
1065
+ "loss": 1.6914,
1066
+ "step": 135
1067
+ },
1068
+ {
1069
+ "epoch": 0.03840451817860925,
1070
+ "grad_norm": 1.368131160736084,
1071
+ "learning_rate": 0.00016910626489868649,
1072
+ "loss": 1.5647,
1073
+ "step": 136
1074
+ },
1075
+ {
1076
+ "epoch": 0.038686904341687256,
1077
+ "grad_norm": 1.5058932304382324,
1078
+ "learning_rate": 0.00016864143404914504,
1079
+ "loss": 2.4011,
1080
+ "step": 137
1081
+ },
1082
+ {
1083
+ "epoch": 0.03896929050476527,
1084
+ "grad_norm": 2.3039586544036865,
1085
+ "learning_rate": 0.00016817378161909996,
1086
+ "loss": 0.9973,
1087
+ "step": 138
1088
+ },
1089
+ {
1090
+ "epoch": 0.03925167666784327,
1091
+ "grad_norm": 1.9210929870605469,
1092
+ "learning_rate": 0.00016770332683191096,
1093
+ "loss": 1.7679,
1094
+ "step": 139
1095
+ },
1096
+ {
1097
+ "epoch": 0.039534062830921285,
1098
+ "grad_norm": 1.3414863348007202,
1099
+ "learning_rate": 0.0001672300890261317,
1100
+ "loss": 1.4788,
1101
+ "step": 140
1102
+ },
1103
+ {
1104
+ "epoch": 0.039534062830921285,
1105
+ "eval_loss": 1.6231486797332764,
1106
+ "eval_runtime": 133.2243,
1107
+ "eval_samples_per_second": 5.6,
1108
+ "eval_steps_per_second": 5.6,
1109
+ "step": 140
1110
  }
1111
  ],
1112
  "logging_steps": 1,
 
1121
  "early_stopping_threshold": 0.0
1122
  },
1123
  "attributes": {
1124
+ "early_stopping_patience_counter": 1
1125
  }
1126
  },
1127
  "TrainerControl": {
 
1135
  "attributes": {}
1136
  }
1137
  },
1138
+ "total_flos": 1.370325019262976e+16,
1139
  "train_batch_size": 1,
1140
  "trial_name": null,
1141
  "trial_params": null