Upload model
Browse files- README.md +84 -0
- adapter_model.bin +1 -1
README.md
CHANGED
@@ -81,6 +81,83 @@ The following `bitsandbytes` quantization config was used during training:
|
|
81 |
- bnb_4bit_use_double_quant: True
|
82 |
- bnb_4bit_compute_dtype: float16
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
The following `bitsandbytes` quantization config was used during training:
|
85 |
- load_in_8bit: False
|
86 |
- load_in_4bit: True
|
@@ -93,6 +170,13 @@ The following `bitsandbytes` quantization config was used during training:
|
|
93 |
- bnb_4bit_compute_dtype: float16
|
94 |
### Framework versions
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
- PEFT 0.4.0.dev0
|
97 |
- PEFT 0.4.0.dev0
|
98 |
- PEFT 0.4.0.dev0
|
|
|
81 |
- bnb_4bit_use_double_quant: True
|
82 |
- bnb_4bit_compute_dtype: float16
|
83 |
|
84 |
+
The following `bitsandbytes` quantization config was used during training:
|
85 |
+
- load_in_8bit: False
|
86 |
+
- load_in_4bit: True
|
87 |
+
- llm_int8_threshold: 6.0
|
88 |
+
- llm_int8_skip_modules: None
|
89 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
90 |
+
- llm_int8_has_fp16_weight: False
|
91 |
+
- bnb_4bit_quant_type: fp4
|
92 |
+
- bnb_4bit_use_double_quant: True
|
93 |
+
- bnb_4bit_compute_dtype: float16
|
94 |
+
|
95 |
+
The following `bitsandbytes` quantization config was used during training:
|
96 |
+
- load_in_8bit: False
|
97 |
+
- load_in_4bit: True
|
98 |
+
- llm_int8_threshold: 6.0
|
99 |
+
- llm_int8_skip_modules: None
|
100 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
101 |
+
- llm_int8_has_fp16_weight: False
|
102 |
+
- bnb_4bit_quant_type: fp4
|
103 |
+
- bnb_4bit_use_double_quant: True
|
104 |
+
- bnb_4bit_compute_dtype: float16
|
105 |
+
|
106 |
+
The following `bitsandbytes` quantization config was used during training:
|
107 |
+
- load_in_8bit: False
|
108 |
+
- load_in_4bit: True
|
109 |
+
- llm_int8_threshold: 6.0
|
110 |
+
- llm_int8_skip_modules: None
|
111 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
112 |
+
- llm_int8_has_fp16_weight: False
|
113 |
+
- bnb_4bit_quant_type: fp4
|
114 |
+
- bnb_4bit_use_double_quant: True
|
115 |
+
- bnb_4bit_compute_dtype: float16
|
116 |
+
|
117 |
+
The following `bitsandbytes` quantization config was used during training:
|
118 |
+
- load_in_8bit: False
|
119 |
+
- load_in_4bit: True
|
120 |
+
- llm_int8_threshold: 6.0
|
121 |
+
- llm_int8_skip_modules: None
|
122 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
123 |
+
- llm_int8_has_fp16_weight: False
|
124 |
+
- bnb_4bit_quant_type: fp4
|
125 |
+
- bnb_4bit_use_double_quant: True
|
126 |
+
- bnb_4bit_compute_dtype: float16
|
127 |
+
|
128 |
+
The following `bitsandbytes` quantization config was used during training:
|
129 |
+
- load_in_8bit: False
|
130 |
+
- load_in_4bit: True
|
131 |
+
- llm_int8_threshold: 6.0
|
132 |
+
- llm_int8_skip_modules: None
|
133 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
134 |
+
- llm_int8_has_fp16_weight: False
|
135 |
+
- bnb_4bit_quant_type: fp4
|
136 |
+
- bnb_4bit_use_double_quant: True
|
137 |
+
- bnb_4bit_compute_dtype: float16
|
138 |
+
|
139 |
+
The following `bitsandbytes` quantization config was used during training:
|
140 |
+
- load_in_8bit: False
|
141 |
+
- load_in_4bit: True
|
142 |
+
- llm_int8_threshold: 6.0
|
143 |
+
- llm_int8_skip_modules: None
|
144 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
145 |
+
- llm_int8_has_fp16_weight: False
|
146 |
+
- bnb_4bit_quant_type: fp4
|
147 |
+
- bnb_4bit_use_double_quant: True
|
148 |
+
- bnb_4bit_compute_dtype: float16
|
149 |
+
|
150 |
+
The following `bitsandbytes` quantization config was used during training:
|
151 |
+
- load_in_8bit: False
|
152 |
+
- load_in_4bit: True
|
153 |
+
- llm_int8_threshold: 6.0
|
154 |
+
- llm_int8_skip_modules: None
|
155 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
156 |
+
- llm_int8_has_fp16_weight: False
|
157 |
+
- bnb_4bit_quant_type: fp4
|
158 |
+
- bnb_4bit_use_double_quant: True
|
159 |
+
- bnb_4bit_compute_dtype: float16
|
160 |
+
|
161 |
The following `bitsandbytes` quantization config was used during training:
|
162 |
- load_in_8bit: False
|
163 |
- load_in_4bit: True
|
|
|
170 |
- bnb_4bit_compute_dtype: float16
|
171 |
### Framework versions
|
172 |
|
173 |
+
- PEFT 0.4.0.dev0
|
174 |
+
- PEFT 0.4.0.dev0
|
175 |
+
- PEFT 0.4.0.dev0
|
176 |
+
- PEFT 0.4.0.dev0
|
177 |
+
- PEFT 0.4.0.dev0
|
178 |
+
- PEFT 0.4.0.dev0
|
179 |
+
- PEFT 0.4.0.dev0
|
180 |
- PEFT 0.4.0.dev0
|
181 |
- PEFT 0.4.0.dev0
|
182 |
- PEFT 0.4.0.dev0
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 324598229
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b84f642ca839980d04318d50e84cc6946156ef3313deadf59c8ae5c9cec2b37
|
3 |
size 324598229
|