TinyPixel commited on
Commit
e28ce5d
·
1 Parent(s): 88c1962

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -19,9 +19,9 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "gate_proj",
23
- "down_proj",
24
- "up_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "up_proj",
23
  "gate_proj",
24
+ "down_proj"
 
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58d114eb040f54e2692a420917bec596c0695c0cd31485a0288b5e726b0ef589
3
  size 113271504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c11e80c2626b7e725c36b5d59e4313d238413de8eebfdb5b1d6b6e28ab8c714b
3
  size 113271504
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b645afabb74c2cf7ba011308647c11b78c30d0838c0a9c13339b1243b7030104
3
  size 226609018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87d135c01136a11bcef2ed383e09bc32d52fff97352f14798fe440aa0238e742
3
  size 226609018
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a8a82e80daa12ff099e617cf99473e3bb80028e142245ed1e3d1d876dcb1535
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3698f87a076ace386d58713681a24c766465261225a60a74e4631003e3b501c
3
  size 14244
trainer_state.json CHANGED
@@ -11,145 +11,145 @@
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 2e-05,
14
- "loss": 1.6262,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 2e-05,
20
- "loss": 1.8704,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 2e-05,
26
- "loss": 1.8716,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 2e-05,
32
- "loss": 1.977,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 2e-05,
38
- "loss": 2.0637,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 2e-05,
44
- "loss": 2.0112,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 2e-05,
50
- "loss": 1.6794,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 2e-05,
56
- "loss": 1.5713,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 2e-05,
62
- "loss": 1.8947,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 2e-05,
68
- "loss": 1.6961,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 2e-05,
74
- "loss": 1.9329,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 2e-05,
80
- "loss": 1.9001,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 2e-05,
86
- "loss": 1.6055,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 2e-05,
92
- "loss": 1.7543,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 2e-05,
98
- "loss": 1.9416,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 2e-05,
104
- "loss": 1.7657,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 2e-05,
110
- "loss": 1.7753,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 2e-05,
116
- "loss": 1.9847,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 2e-05,
122
- "loss": 1.6714,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 2e-05,
128
- "loss": 1.853,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 2e-05,
134
- "loss": 1.9394,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 2e-05,
140
- "loss": 1.9669,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 2e-05,
146
- "loss": 1.8408,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 2e-05,
152
- "loss": 1.8702,
153
  "step": 48
154
  }
155
  ],
@@ -158,7 +158,7 @@
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 3,
160
  "save_steps": 500,
161
- "total_flos": 1.931399969882112e+16,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
 
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 2e-05,
14
+ "loss": 1.6235,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 2e-05,
20
+ "loss": 1.878,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 2e-05,
26
+ "loss": 1.8871,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 2e-05,
32
+ "loss": 1.9954,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 2e-05,
38
+ "loss": 2.0806,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 2e-05,
44
+ "loss": 2.0435,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 2e-05,
50
+ "loss": 1.6826,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 2e-05,
56
+ "loss": 1.5779,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 2e-05,
62
+ "loss": 1.9058,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 2e-05,
68
+ "loss": 1.6972,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 2e-05,
74
+ "loss": 1.9398,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 2e-05,
80
+ "loss": 1.9236,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 2e-05,
86
+ "loss": 1.6062,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 2e-05,
92
+ "loss": 1.7559,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 2e-05,
98
+ "loss": 1.9474,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 2e-05,
104
+ "loss": 1.7649,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 2e-05,
110
+ "loss": 1.7762,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 2e-05,
116
+ "loss": 2.011,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 2e-05,
122
+ "loss": 1.6693,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 2e-05,
128
+ "loss": 1.8561,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 2e-05,
134
+ "loss": 1.9452,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 2e-05,
140
+ "loss": 1.9725,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 2e-05,
146
+ "loss": 1.8454,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 2e-05,
152
+ "loss": 1.8836,
153
  "step": 48
154
  }
155
  ],
 
158
  "num_input_tokens_seen": 0,
159
  "num_train_epochs": 3,
160
  "save_steps": 500,
161
+ "total_flos": 1.901574003725107e+16,
162
  "train_batch_size": 1,
163
  "trial_name": null,
164
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09cc70b30444e968e7edb0b6e89583eb33e514fc6a008179a6727031b56aed62
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1142229c66bf3f832461258a15f379deffb2b5691470e753d093d27a4adb154c
3
  size 4728