Update README.md
Browse files
README.md
CHANGED
@@ -115,14 +115,15 @@ You will first need to install `transformers` and `accelerate` (just to ease the
|
|
115 |
```python
|
116 |
import torch
|
117 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
118 |
-
model = AutoModelForCausalLM.from_pretrained("DRXD1000/Phoenix", torch_dtype=torch.bfloat16, device_map="auto")
|
119 |
-
tokenizer = AutoTokenizer.from_pretrained("DRXD1000/Phoenix")
|
120 |
-
prompt =
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
"""
|
|
|
126 |
inputs = tokenizer.apply_chat_template(prompt, return_tensors="pt").to("cuda")
|
127 |
outputs = model.generate(inputs, num_return_sequences=1, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
128 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
115 |
```python
|
116 |
import torch
|
117 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
118 |
+
model = AutoModelForCausalLM.from_pretrained("DRXD1000/Phoenix-GPTQ", torch_dtype=torch.bfloat16, device_map="auto")
|
119 |
+
tokenizer = AutoTokenizer.from_pretrained("DRXD1000/Phoenix-GPTQ")
|
120 |
+
prompt = [
|
121 |
+
{
|
122 |
+
"role": "system",
|
123 |
+
"content": "", #Not recommended. Phoenix does not react well on system prompts
|
124 |
+
},
|
125 |
+
{"role": "user", "content": "Erkläre mir was KI ist"},
|
126 |
+
]
|
127 |
inputs = tokenizer.apply_chat_template(prompt, return_tensors="pt").to("cuda")
|
128 |
outputs = model.generate(inputs, num_return_sequences=1, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
129 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|