Update README.md
Browse files
README.md
CHANGED
@@ -77,12 +77,6 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
77 |
|
78 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
79 |
|
80 |
-
messages = [
|
81 |
-
{"role": "user", "content": "What is your favourite condiment?"},
|
82 |
-
{"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
|
83 |
-
{"role": "user", "content": "Do you have mayonnaise recipes?"}
|
84 |
-
]
|
85 |
-
|
86 |
input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
|
87 |
|
88 |
outputs = model.generate(input_ids, max_new_tokens=20)
|
|
|
77 |
|
78 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
|
81 |
|
82 |
outputs = model.generate(input_ids, max_new_tokens=20)
|