Add load_in_4bit flag for base model
Browse files
README.md
CHANGED
@@ -44,7 +44,7 @@ To use these weights:
|
|
44 |
from peft import PeftModel, PeftConfig
|
45 |
from transformers import AutoModelForCausalLM
|
46 |
|
47 |
-
config = PeftConfig.from_pretrained("arnavgrg/codealpaca-qlora")
|
48 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
49 |
model = PeftModel.from_pretrained(model, "arnavgrg/codealpaca-qlora")
|
50 |
```
|
|
|
44 |
from peft import PeftModel, PeftConfig
|
45 |
from transformers import AutoModelForCausalLM
|
46 |
|
47 |
+
config = PeftConfig.from_pretrained("arnavgrg/codealpaca-qlora", load_in_4bit=True)
|
48 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
49 |
model = PeftModel.from_pretrained(model, "arnavgrg/codealpaca-qlora")
|
50 |
```
|