Update README.md
Browse files
README.md
CHANGED
@@ -45,24 +45,38 @@ dtype: bfloat16
|
|
45 |
## 💻 Usage
|
46 |
|
47 |
```python
|
|
|
48 |
!pip install -qU transformers accelerate
|
49 |
|
50 |
-
|
51 |
-
import
|
52 |
import torch
|
53 |
|
54 |
-
|
55 |
-
|
|
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
65 |
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
```
|
|
|
45 |
## 💻 Usage
|
46 |
|
47 |
```python
|
48 |
+
# Installation
|
49 |
!pip install -qU transformers accelerate
|
50 |
|
51 |
+
# Imports
|
52 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
53 |
import torch
|
54 |
|
55 |
+
# Loading
|
56 |
+
tokenizer = AutoTokenizer.from_pretrained("./merge/")
|
57 |
+
model = AutoModelForCausalLM.from_pretrained("./merge/")
|
58 |
|
59 |
+
# Completion function
|
60 |
+
def infer(prompt, **kwargs):
|
61 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
62 |
+
with torch.no_grad():
|
63 |
+
outputs = model.generate(**inputs, **kwargs)
|
64 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
65 |
+
|
66 |
+
# Some silliness
|
67 |
+
infer("<|user|>\nBen is going to the store for some Ice Cream. So is Jerry. They mix up the ice cream at the store. Is the ice cream: (a. Ben's (b. Jerry's (c. Ben and Jerry's <|end|>\n<|assistant|>\nMy answer is (", max_new_tokens=1024)
|
68 |
|
69 |
+
# A proper test
|
70 |
+
infer(
|
71 |
+
"""
|
72 |
+
<|user|>
|
73 |
+
Explain what a Mixture of Experts is in less than 100 words.
|
74 |
+
<|assistant|>
|
75 |
+
""",
|
76 |
+
max_new_tokens=1024,
|
77 |
+
do_sample=False,
|
78 |
+
temperature=0.0,
|
79 |
+
top_k=50,
|
80 |
+
top_p=0.89,
|
81 |
+
)
|
82 |
```
|