xzuyn
commited on
Remove `validate_quantized_dora` (#1485)
Browse filesDoRA with quantized layers is supported with PEFT 0.10.0
src/axolotl/utils/config/models/input/v0_4_1/__init__.py
CHANGED
@@ -242,17 +242,6 @@ class LoraConfig(BaseModel):
|
|
242 |
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
243 |
return self
|
244 |
|
245 |
-
@model_validator(mode="before")
|
246 |
-
@classmethod
|
247 |
-
def validate_quantized_dora(cls, data):
|
248 |
-
if data.get("peft_use_dora") and (
|
249 |
-
data.get("load_in_8bit") or data.get("load_in_4bit")
|
250 |
-
):
|
251 |
-
raise ValueError(
|
252 |
-
"`peft_use_dora` is not currently compatible with quantized weights."
|
253 |
-
)
|
254 |
-
return data
|
255 |
-
|
256 |
|
257 |
class ReLoRAConfig(BaseModel):
|
258 |
"""ReLoRA configuration subset"""
|
|
|
242 |
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
243 |
return self
|
244 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
class ReLoRAConfig(BaseModel):
|
247 |
"""ReLoRA configuration subset"""
|