TinyPixel commited on
Commit
180d6ba
·
verified ·
1 Parent(s): 442f5ed

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -19,9 +19,9 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "down_proj",
23
  "up_proj",
24
- "gate_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "gate_proj",
23
  "up_proj",
24
+ "down_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:462da6b1dacec1acdde5d63ab626a28225936bab7d9c87b93cd08f076623b652
3
  size 113271504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9185204c1ca65430df44f37ad7dc9a7d8607a11b8b14944579a99e1c1861986c
3
  size 113271504
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7e0a7cd7e3e5186cb2c3598458545087a5cc9c0c97aae42ac53aa6b237e0ae6
3
  size 226609018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d903b5e84103ffa728fdf2bc9491ed4b4ac2e5d6506c31daddd299090df94467
3
  size 226609018
special_tokens_map.json CHANGED
@@ -13,7 +13,13 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "</s>",
 
 
 
 
 
 
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
  "unk_token": {
24
  "content": "<unk>",
25
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -33,7 +33,7 @@
33
  "eos_token": "</s>",
34
  "legacy": true,
35
  "model_max_length": 1000000000000000019884624838656,
36
- "pad_token": "</s>",
37
  "sp_model_kwargs": {},
38
  "spaces_between_special_tokens": false,
39
  "tokenizer_class": "LlamaTokenizer",
 
33
  "eos_token": "</s>",
34
  "legacy": true,
35
  "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "<unk>",
37
  "sp_model_kwargs": {},
38
  "spaces_between_special_tokens": false,
39
  "tokenizer_class": "LlamaTokenizer",
trainer_state.json CHANGED
@@ -11,145 +11,145 @@
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 2e-05,
14
- "loss": 1.7128,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 2e-05,
20
- "loss": 1.8515,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 2e-05,
26
- "loss": 1.8735,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 2e-05,
32
- "loss": 2.0181,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 2e-05,
38
- "loss": 2.1375,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 2e-05,
44
- "loss": 2.3034,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 2e-05,
50
- "loss": 1.6288,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 2e-05,
56
- "loss": 1.5506,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 2e-05,
62
- "loss": 1.8434,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 2e-05,
68
- "loss": 1.6927,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 2e-05,
74
- "loss": 1.9721,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 2e-05,
80
- "loss": 1.9983,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 2e-05,
86
- "loss": 1.5436,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 2e-05,
92
- "loss": 1.7244,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 2e-05,
98
- "loss": 1.8803,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 2e-05,
104
- "loss": 1.7137,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 2e-05,
110
- "loss": 1.7194,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 2e-05,
116
- "loss": 1.8995,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 2e-05,
122
- "loss": 1.5938,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 2e-05,
128
- "loss": 1.7493,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 2e-05,
134
- "loss": 1.8429,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 2e-05,
140
- "loss": 1.8747,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 2e-05,
146
- "loss": 1.7454,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 2e-05,
152
- "loss": 1.7541,
153
  "step": 48
154
  }
155
  ],
 
11
  {
12
  "epoch": 0.04,
13
  "learning_rate": 2e-05,
14
+ "loss": 1.7311,
15
  "step": 2
16
  },
17
  {
18
  "epoch": 0.08,
19
  "learning_rate": 2e-05,
20
+ "loss": 1.8755,
21
  "step": 4
22
  },
23
  {
24
  "epoch": 0.12,
25
  "learning_rate": 2e-05,
26
+ "loss": 1.9054,
27
  "step": 6
28
  },
29
  {
30
  "epoch": 0.16,
31
  "learning_rate": 2e-05,
32
+ "loss": 2.052,
33
  "step": 8
34
  },
35
  {
36
  "epoch": 0.21,
37
  "learning_rate": 2e-05,
38
+ "loss": 2.174,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.25,
43
  "learning_rate": 2e-05,
44
+ "loss": 2.3593,
45
  "step": 12
46
  },
47
  {
48
  "epoch": 0.29,
49
  "learning_rate": 2e-05,
50
+ "loss": 1.6346,
51
  "step": 14
52
  },
53
  {
54
  "epoch": 0.33,
55
  "learning_rate": 2e-05,
56
+ "loss": 1.5595,
57
  "step": 16
58
  },
59
  {
60
  "epoch": 0.37,
61
  "learning_rate": 2e-05,
62
+ "loss": 1.851,
63
  "step": 18
64
  },
65
  {
66
  "epoch": 0.41,
67
  "learning_rate": 2e-05,
68
+ "loss": 1.6995,
69
  "step": 20
70
  },
71
  {
72
  "epoch": 0.45,
73
  "learning_rate": 2e-05,
74
+ "loss": 1.9828,
75
  "step": 22
76
  },
77
  {
78
  "epoch": 0.49,
79
  "learning_rate": 2e-05,
80
+ "loss": 2.0154,
81
  "step": 24
82
  },
83
  {
84
  "epoch": 0.53,
85
  "learning_rate": 2e-05,
86
+ "loss": 1.5442,
87
  "step": 26
88
  },
89
  {
90
  "epoch": 0.57,
91
  "learning_rate": 2e-05,
92
+ "loss": 1.7272,
93
  "step": 28
94
  },
95
  {
96
  "epoch": 0.62,
97
  "learning_rate": 2e-05,
98
+ "loss": 1.8829,
99
  "step": 30
100
  },
101
  {
102
  "epoch": 0.66,
103
  "learning_rate": 2e-05,
104
+ "loss": 1.7181,
105
  "step": 32
106
  },
107
  {
108
  "epoch": 0.7,
109
  "learning_rate": 2e-05,
110
+ "loss": 1.7273,
111
  "step": 34
112
  },
113
  {
114
  "epoch": 0.74,
115
  "learning_rate": 2e-05,
116
+ "loss": 1.9079,
117
  "step": 36
118
  },
119
  {
120
  "epoch": 0.78,
121
  "learning_rate": 2e-05,
122
+ "loss": 1.5964,
123
  "step": 38
124
  },
125
  {
126
  "epoch": 0.82,
127
  "learning_rate": 2e-05,
128
+ "loss": 1.751,
129
  "step": 40
130
  },
131
  {
132
  "epoch": 0.86,
133
  "learning_rate": 2e-05,
134
+ "loss": 1.8439,
135
  "step": 42
136
  },
137
  {
138
  "epoch": 0.9,
139
  "learning_rate": 2e-05,
140
+ "loss": 1.8702,
141
  "step": 44
142
  },
143
  {
144
  "epoch": 0.94,
145
  "learning_rate": 2e-05,
146
+ "loss": 1.7397,
147
  "step": 46
148
  },
149
  {
150
  "epoch": 0.98,
151
  "learning_rate": 2e-05,
152
+ "loss": 1.7357,
153
  "step": 48
154
  }
155
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34cbce6bccba8280ee82dc757f6360bcde7994e7543d19b82190f9cc8a5487be
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f7c6a339d11a6181488bf73e4550f6121f9691d255a16fbb92fea537306e476
3
  size 4728