Update README.md
Browse files
README.md
CHANGED
@@ -90,4 +90,30 @@ You are focused on providing systematic, well-reasoned responses. Response Struc
|
|
90 |
|
91 |
if __name__ == "__main__":
|
92 |
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
```
|
|
|
90 |
|
91 |
if __name__ == "__main__":
|
92 |
main()
|
93 |
+
```
|
94 |
+
|
95 |
+
Or alternatively:
|
96 |
+
```python
|
97 |
+
import torch
|
98 |
+
from transformers import pipeline
|
99 |
+
|
100 |
+
model_id = "CreitinGameplays/Llama-3.1-8B-R1-experimental"
|
101 |
+
|
102 |
+
pipe = pipeline(
|
103 |
+
"text-generation",
|
104 |
+
model=model_id,
|
105 |
+
torch_dtype=torch.bfloat16,
|
106 |
+
device_map="auto"
|
107 |
+
)
|
108 |
+
|
109 |
+
messages = [{"role": "user", "content": "hello!"}]
|
110 |
+
|
111 |
+
outputs = pipe(
|
112 |
+
messages,
|
113 |
+
temperature=0.6,
|
114 |
+
repetition_penalty=1.08,
|
115 |
+
max_new_tokens=2048
|
116 |
+
)
|
117 |
+
|
118 |
+
print(outputs[0]["generated_text"][-1])
|
119 |
```
|