Update README.md
Browse files
README.md
CHANGED
@@ -25,8 +25,46 @@ It achieves the following results on the evaluation set:
|
|
25 |
- Logits/rejected: -2.5535
|
26 |
- Logits/chosen: -2.7973
|
27 |
|
|
|
28 |
|
29 |
-
**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
## Model description
|
32 |
|
@@ -222,4 +260,48 @@ hf (pretrained=fblgit/juanako-7b-v1,load_in_4bit=False,dtype=float16), limit: No
|
|
222 |
| - humanities |N/A |none |acc |0.5405|± |0.1478|
|
223 |
| - other |N/A |none |acc |0.6894|± |0.1091|
|
224 |
| - social_sciences|N/A |none |acc |0.7195|± |0.0676|
|
225 |
-
| - stem |N/A |none |acc |0.5217|± |0.1149|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
- Logits/rejected: -2.5535
|
26 |
- Logits/chosen: -2.7973
|
27 |
|
28 |
+
Followed [alignment-handbook](https://github.com/huggingface/alignment-handbook) to perform DPO (Phase 2) over Zephyr-SFT model.
|
29 |
|
30 |
+
**Please feel free to run more tests and commit the results. Also if you are interested to participate in [UNA's paper research or GPU sponsorship](mailto:[email protected])**
|
31 |
+
|
32 |
+
Special thanks to [TheBloke](https://huggingface.co/TheBloke) for converting the model into multiple formats and overall his enormous contribution to the community.
|
33 |
+
Here are the models:
|
34 |
+
* [juanako-7B-v1-AWQ](https://huggingface.co/TheBloke/juanako-7B-v1-AWQ)
|
35 |
+
* [juanako-7B-v1-GPTQ](https://huggingface.co/TheBloke/juanako-7B-v1-GPTQ)
|
36 |
+
* [juanako-7B-v1-GGUF](https://huggingface.co/TheBloke/juanako-7B-v1-GGUF)
|
37 |
+
|
38 |
+
|
39 |
+
## Prompt and Inference Usage
|
40 |
+
```
|
41 |
+
# Install transformers from source - only needed for versions <= v4.34
|
42 |
+
# pip install git+https://github.com/huggingface/transformers.git
|
43 |
+
# pip install accelerate
|
44 |
+
|
45 |
+
import torch
|
46 |
+
from transformers import pipeline
|
47 |
+
|
48 |
+
pipe = pipeline("text-generation", model="fblgit/juanako-7b-v1", torch_dtype=torch.float16, device_map="auto")
|
49 |
+
|
50 |
+
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
51 |
+
messages = [
|
52 |
+
{
|
53 |
+
"role": "system",
|
54 |
+
"content": "You are a friendly chatbot who always responds in the style of a pirate",
|
55 |
+
},
|
56 |
+
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
|
57 |
+
]
|
58 |
+
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
59 |
+
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
60 |
+
print(outputs[0]["generated_text"])
|
61 |
+
# <|system|>
|
62 |
+
# You are a friendly chatbot who always responds in the style of a pirate.</s>
|
63 |
+
# <|user|>
|
64 |
+
# How many helicopters can a human eat in one sitting?</s>
|
65 |
+
# <|assistant|>
|
66 |
+
# Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food!
|
67 |
+
```
|
68 |
|
69 |
## Model description
|
70 |
|
|
|
260 |
| - humanities |N/A |none |acc |0.5405|± |0.1478|
|
261 |
| - other |N/A |none |acc |0.6894|± |0.1091|
|
262 |
| - social_sciences|N/A |none |acc |0.7195|± |0.0676|
|
263 |
+
| - stem |N/A |none |acc |0.5217|± |0.1149|
|
264 |
+
|
265 |
+
### Citations
|
266 |
+
|
267 |
+
@misc{tunstall2023zephyr,
|
268 |
+
title={Zephyr: Direct Distillation of LM Alignment},
|
269 |
+
author={Lewis Tunstall and Edward Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro von Werra and Clémentine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M. Rush and Thomas Wolf},
|
270 |
+
year={2023},
|
271 |
+
eprint={2310.16944},
|
272 |
+
archivePrefix={arXiv},
|
273 |
+
primaryClass={cs.LG}
|
274 |
+
}
|
275 |
+
@software{eval-harness,
|
276 |
+
author = {Gao, Leo and
|
277 |
+
Tow, Jonathan and
|
278 |
+
Biderman, Stella and
|
279 |
+
Black, Sid and
|
280 |
+
DiPofi, Anthony and
|
281 |
+
Foster, Charles and
|
282 |
+
Golding, Laurence and
|
283 |
+
Hsu, Jeffrey and
|
284 |
+
McDonell, Kyle and
|
285 |
+
Muennighoff, Niklas and
|
286 |
+
Phang, Jason and
|
287 |
+
Reynolds, Laria and
|
288 |
+
Tang, Eric and
|
289 |
+
Thite, Anish and
|
290 |
+
Wang, Ben and
|
291 |
+
Wang, Kevin and
|
292 |
+
Zou, Andy},
|
293 |
+
title = {A framework for few-shot language model evaluation},
|
294 |
+
month = sep,
|
295 |
+
year = 2021,
|
296 |
+
publisher = {Zenodo},
|
297 |
+
version = {v0.0.1},
|
298 |
+
doi = {10.5281/zenodo.5371628},
|
299 |
+
url = {https://doi.org/10.5281/zenodo.5371628}
|
300 |
+
}
|
301 |
+
@misc{rafailov2023direct,
|
302 |
+
title={Direct Preference Optimization: Your Language Model is Secretly a Reward Model},
|
303 |
+
author={Rafael Rafailov and Archit Sharma and Eric Mitchell and Stefano Ermon and Christopher D. Manning and Chelsea Finn},
|
304 |
+
year={2023},
|
305 |
+
eprint={2305.18290},
|
306 |
+
archivePrefix={arXiv},
|
307 |
+
}
|