SasakiYusuke
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -28,9 +28,7 @@ Of that, 70% was used for training data and 30% for evaluation.
|
|
28 |
import torch
|
29 |
from transformers import pipeline
|
30 |
|
31 |
-
pipe = pipeline("text-generation", model="
|
32 |
-
|
33 |
-
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
34 |
|
35 |
text = '''Create a program that determines whether a given year is a leap year or not.
|
36 |
The input is an integer Y (1000 ≤ Y ≤ 2999) representing a year, provided in a single line.
|
@@ -42,13 +40,13 @@ text = '''Create a program that determines whether a given year is a leap year o
|
|
42 |
Rule 4: If none of the above rules (Rule 1-3) apply, the year is not a leap year.
|
43 |
If a year satisfies multiple rules, the rule with the higher number takes precedence.
|
44 |
'''
|
45 |
-
|
46 |
messages = [
|
47 |
{
|
48 |
"role": "system",
|
49 |
"content": "You are a chatbot who can help code!",
|
50 |
},
|
51 |
-
{"role": "user", "content": f"{
|
52 |
]
|
53 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
54 |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
|
|
28 |
import torch
|
29 |
from transformers import pipeline
|
30 |
|
31 |
+
pipe = pipeline("text-generation", model="Danibasters/TinyLlama-1.1B-Chat-v1.0-PCD250k_v0.1", torch_dtype=torch.bfloat16, device_map="auto")
|
|
|
|
|
32 |
|
33 |
text = '''Create a program that determines whether a given year is a leap year or not.
|
34 |
The input is an integer Y (1000 ≤ Y ≤ 2999) representing a year, provided in a single line.
|
|
|
40 |
Rule 4: If none of the above rules (Rule 1-3) apply, the year is not a leap year.
|
41 |
If a year satisfies multiple rules, the rule with the higher number takes precedence.
|
42 |
'''
|
43 |
+
texts = f"Translate the following problem statement into Python code. :\n{text}"
|
44 |
messages = [
|
45 |
{
|
46 |
"role": "system",
|
47 |
"content": "You are a chatbot who can help code!",
|
48 |
},
|
49 |
+
{"role": "user", "content": f"{texts}}"},
|
50 |
]
|
51 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
52 |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|