Training in progress, epoch 1
Browse files- adapter_config.json +2 -2
- adapter_model.safetensors +1 -1
- inference.py +43 -0
- training_args.bin +1 -1
adapter_config.json
CHANGED
@@ -19,8 +19,8 @@
|
|
19 |
"rank_pattern": {},
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
-
"
|
23 |
-
"
|
24 |
],
|
25 |
"task_type": "CAUSAL_LM"
|
26 |
}
|
|
|
19 |
"rank_pattern": {},
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
+
"q_proj",
|
23 |
+
"v_proj"
|
24 |
],
|
25 |
"task_type": "CAUSAL_LM"
|
26 |
}
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 109069176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0506953dfdadc325678dffd6d7163a20b524c979ea5d9a8d63ba5cf94ef9629d
|
3 |
size 109069176
|
inference.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments
|
2 |
+
from peft import prepare_model_for_kbit_training, LoraConfig, get_peft_model
|
3 |
+
import torch
|
4 |
+
from peft import PeftModel, PeftConfig
|
5 |
+
from transformers import AutoModelForCausalLM
|
6 |
+
|
7 |
+
|
8 |
+
model_name = "zephyr_instruct_generation/checkpoint-150"
|
9 |
+
|
10 |
+
#config = PeftConfig.from_pretrained("Grigorij/mistral_instruct_generation")
|
11 |
+
config = PeftConfig.from_pretrained(model_name)
|
12 |
+
bnb_config = BitsAndBytesConfig(
|
13 |
+
load_in_4bit=True,
|
14 |
+
bnb_4bit_use_double_quant=True,
|
15 |
+
bnb_4bit_quant_type="nf4",
|
16 |
+
bnb_4bit_compute_dtype=torch.bfloat16
|
17 |
+
)
|
18 |
+
|
19 |
+
model = AutoModelForCausalLM.from_pretrained(
|
20 |
+
"HuggingFaceH4/zephyr-7b-beta", quantization_config=bnb_config, device_map='auto', use_cache=False)
|
21 |
+
model = PeftModel.from_pretrained(model, model_name)
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
24 |
+
"mistralai/Mistral-7B-v0.1",
|
25 |
+
padding=True,
|
26 |
+
padding_side="left",
|
27 |
+
add_eos_token=True,
|
28 |
+
add_bos_token=True,
|
29 |
+
)
|
30 |
+
tokenizer.pad_token = tokenizer.eos_token
|
31 |
+
|
32 |
+
def generate_response(prompt, model):
|
33 |
+
encoded_input = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
34 |
+
model_inputs = encoded_input.to('cuda')
|
35 |
+
generated_ids = model.generate(**model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
36 |
+
|
37 |
+
decoded_output = tokenizer.batch_decode(generated_ids)
|
38 |
+
|
39 |
+
return decoded_output[0]
|
40 |
+
|
41 |
+
|
42 |
+
response = generate_response("<s><|system|>\n<|user|>\nFront: low rocks, mineral, Right: crater, Left: clear space, Rear: clear space, container: empty, base: rear-left\n<|assistant|>\n", model)
|
43 |
+
print(response)
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4728
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d65a60e6624671705b0bae42c7861fb7c436e4099fd42d6417622a53e74c5698
|
3 |
size 4728
|