Update README.md
Browse files
README.md
CHANGED
@@ -29,14 +29,13 @@ llm = pipeline(
|
|
29 |
"text-generation",
|
30 |
model=model_id,
|
31 |
torch_dtype=torch.float16,
|
32 |
-
max_new_tokens=512,
|
33 |
device_map="auto",
|
34 |
)
|
35 |
|
36 |
def generate(input_text):
|
37 |
messages = [{"role": "user", "content": input_text}]
|
38 |
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
39 |
-
outputs = llm(prompt)
|
40 |
return outputs[0]["generated_text"][len(prompt):]
|
41 |
|
42 |
generate("Explain quantum tunneling in simple terms.")
|
|
|
29 |
"text-generation",
|
30 |
model=model_id,
|
31 |
torch_dtype=torch.float16,
|
|
|
32 |
device_map="auto",
|
33 |
)
|
34 |
|
35 |
def generate(input_text):
|
36 |
messages = [{"role": "user", "content": input_text}]
|
37 |
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
38 |
+
outputs = llm(prompt, max_new_tokens=512,)
|
39 |
return outputs[0]["generated_text"][len(prompt):]
|
40 |
|
41 |
generate("Explain quantum tunneling in simple terms.")
|