Update README.md
Browse files
README.md
CHANGED
@@ -3,13 +3,21 @@ library_name: peft
|
|
3 |
base_model: yanolja/EEVE-Korean-Instruct-10.8B-v1.0
|
4 |
---
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
``` python
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
9 |
from peft import prepare_model_for_kbit_training, PeftModel, PeftConfig
|
10 |
|
11 |
model_path = 'yanolja/EEVE-Korean-10.8B-v1.0'
|
12 |
-
lora_path = '
|
13 |
|
14 |
bnb_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_quant_type="nf4",bnb_4bit_compute_dtype=torch.float16,)
|
15 |
model = AutoModelForCausalLM.from_pretrained(model_path, quantization_config=bnb_config, trust_remote_code=True)
|
|
|
3 |
base_model: yanolja/EEVE-Korean-Instruct-10.8B-v1.0
|
4 |
---
|
5 |
|
6 |
+
靷毄 雿办澊韯办厠: aihub
|
7 |
+
|
8 |
+
頉堧牗 頇橁步: RTX3090 x 8
|
9 |
+
|
10 |
+
epoch: 1
|
11 |
+
|
12 |
+
time: 19鞁滉皠
|
13 |
+
|
14 |
``` python
|
15 |
import torch
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
17 |
from peft import prepare_model_for_kbit_training, PeftModel, PeftConfig
|
18 |
|
19 |
model_path = 'yanolja/EEVE-Korean-10.8B-v1.0'
|
20 |
+
lora_path = 'qwopqwop/EEVE-ALMA'
|
21 |
|
22 |
bnb_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_quant_type="nf4",bnb_4bit_compute_dtype=torch.float16,)
|
23 |
model = AutoModelForCausalLM.from_pretrained(model_path, quantization_config=bnb_config, trust_remote_code=True)
|