Upload model
Browse files- README.md +72 -0
- adapter_config.json +3 -3
- adapter_model.bin +1 -1
README.md
CHANGED
@@ -233,4 +233,76 @@ The following `bitsandbytes` quantization config was used during training:
|
|
233 |
### Framework versions
|
234 |
|
235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
- PEFT 0.7.0.dev0
|
|
|
233 |
### Framework versions
|
234 |
|
235 |
|
236 |
+
- PEFT 0.7.0.dev0
|
237 |
+
## Training procedure
|
238 |
+
|
239 |
+
|
240 |
+
The following `bitsandbytes` quantization config was used during training:
|
241 |
+
- load_in_8bit: False
|
242 |
+
- load_in_4bit: True
|
243 |
+
- llm_int8_threshold: 6.0
|
244 |
+
- llm_int8_skip_modules: None
|
245 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
246 |
+
- llm_int8_has_fp16_weight: False
|
247 |
+
- bnb_4bit_quant_type: nf4
|
248 |
+
- bnb_4bit_use_double_quant: True
|
249 |
+
- bnb_4bit_compute_dtype: bfloat16
|
250 |
+
|
251 |
+
### Framework versions
|
252 |
+
|
253 |
+
|
254 |
+
- PEFT 0.7.0.dev0
|
255 |
+
## Training procedure
|
256 |
+
|
257 |
+
|
258 |
+
The following `bitsandbytes` quantization config was used during training:
|
259 |
+
- load_in_8bit: False
|
260 |
+
- load_in_4bit: True
|
261 |
+
- llm_int8_threshold: 6.0
|
262 |
+
- llm_int8_skip_modules: None
|
263 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
264 |
+
- llm_int8_has_fp16_weight: False
|
265 |
+
- bnb_4bit_quant_type: nf4
|
266 |
+
- bnb_4bit_use_double_quant: True
|
267 |
+
- bnb_4bit_compute_dtype: bfloat16
|
268 |
+
|
269 |
+
### Framework versions
|
270 |
+
|
271 |
+
|
272 |
+
- PEFT 0.7.0.dev0
|
273 |
+
## Training procedure
|
274 |
+
|
275 |
+
|
276 |
+
The following `bitsandbytes` quantization config was used during training:
|
277 |
+
- load_in_8bit: False
|
278 |
+
- load_in_4bit: True
|
279 |
+
- llm_int8_threshold: 6.0
|
280 |
+
- llm_int8_skip_modules: None
|
281 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
282 |
+
- llm_int8_has_fp16_weight: False
|
283 |
+
- bnb_4bit_quant_type: nf4
|
284 |
+
- bnb_4bit_use_double_quant: True
|
285 |
+
- bnb_4bit_compute_dtype: bfloat16
|
286 |
+
|
287 |
+
### Framework versions
|
288 |
+
|
289 |
+
|
290 |
+
- PEFT 0.7.0.dev0
|
291 |
+
## Training procedure
|
292 |
+
|
293 |
+
|
294 |
+
The following `bitsandbytes` quantization config was used during training:
|
295 |
+
- load_in_8bit: False
|
296 |
+
- load_in_4bit: True
|
297 |
+
- llm_int8_threshold: 6.0
|
298 |
+
- llm_int8_skip_modules: None
|
299 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
300 |
+
- llm_int8_has_fp16_weight: False
|
301 |
+
- bnb_4bit_quant_type: nf4
|
302 |
+
- bnb_4bit_use_double_quant: True
|
303 |
+
- bnb_4bit_compute_dtype: bfloat16
|
304 |
+
|
305 |
+
### Framework versions
|
306 |
+
|
307 |
+
|
308 |
- PEFT 0.7.0.dev0
|
adapter_config.json
CHANGED
@@ -16,10 +16,10 @@
|
|
16 |
"rank_pattern": {},
|
17 |
"revision": null,
|
18 |
"target_modules": [
|
19 |
-
"self_attn.q_proj",
|
20 |
"self_attn.k_proj",
|
21 |
-
"self_attn.
|
22 |
-
"self_attn.v_proj"
|
|
|
23 |
],
|
24 |
"task_type": "CAUSAL_LM"
|
25 |
}
|
|
|
16 |
"rank_pattern": {},
|
17 |
"revision": null,
|
18 |
"target_modules": [
|
|
|
19 |
"self_attn.k_proj",
|
20 |
+
"self_attn.q_proj",
|
21 |
+
"self_attn.v_proj",
|
22 |
+
"self_attn.o_proj"
|
23 |
],
|
24 |
"task_type": "CAUSAL_LM"
|
25 |
}
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 33646413
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69986dda42452b5dfc0bc1d22f67db151422af08ce52471674e5284647c74319
|
3 |
size 33646413
|