Llama-3.2-1B-Instruction-FFT
Collection
1B-Instruction-FFT Training
โข
2 items
โข
Updated
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model_id = 'MDDDDR/Llama-3.2-1B-Instruct-FFT-coder-python'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id,
device_map="cuda:0",
torch_dtype=torch.bfloat16)
instruction = '''LCS(Longest Common Subsequence, ์ต์ฅ ๊ณตํต ๋ถ๋ถ ์์ด)๋ฌธ์ ๋ ๋ ์์ด์ด ์ฃผ์ด์ก์ ๋, ๋ชจ๋์ ๋ถ๋ถ ์์ด์ด ๋๋ ์์ด ์ค ๊ฐ์ฅ ๊ธด ๊ฒ์ ์ฐพ๋ ๋ฌธ์ ์ด๋ค.
์๋ฅผ ๋ค์ด, ACAYKP์ CAPCAK์ LCS๋ ACAK๊ฐ ๋๋ค.
###์
๋ ฅ : ์ฒซ์งธ ์ค๊ณผ ๋์งธ ์ค์ ๋ ๋ฌธ์์ด์ด ์ฃผ์ด์ง๋ค. ๋ฌธ์์ด์ ์ํ๋ฒณ ๋๋ฌธ์๋ก๋ง ์ด๋ฃจ์ด์ ธ ์์ผ๋ฉฐ, ์ต๋ 1000๊ธ์๋ก ์ด๋ฃจ์ด์ ธ ์๋ค.
###์ถ๋ ฅ : ์ฒซ์งธ ์ค์ ์
๋ ฅ์ผ๋ก ์ฃผ์ด์ง ๋ ๋ฌธ์์ด์ LCS์ ๊ธธ์ด๋ฅผ ์ถ๋ ฅํ๋ค.
###์
๋ ฅ ์์ :
ACAYKP
CAPCAK
###์ถ๋ ฅ ์์ : 4
'''
messages = [
{
"role":"user",
"content":"์๋๋ ๋ฌธ์ ๋ฅผ ์ค๋ช
ํ๋ ์ง์์ฌํญ์
๋๋ค. ์ด ์์ฒญ์ ๋ํด ์ ์ ํ๊ฒ ๋ต๋ณํด์ฃผ์ธ์.\n###์ง์์ฌํญ:{instruction}\n###๋ต๋ณ:".format(instruction=instruction)
}
]
with torch.no_grad():
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
inputs = tokenizer(prompt, return_tensors="pt", padding=False).to('cuda')
outputs = model.generate(**inputs,
use_cache=False,
max_length=256,
top_p=0.9,
temperature=0.7,
repetition_penalty=1.0,
pad_token_id=tokenizer.pad_token_id)
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
final_output = output_text.split('๋ต๋ณ:')[-1].strip()
print(final_output)
# ```python
# def longest_common_subsequence(str1, str2):
# m = len(str1)
# n = len(str2)
# dp = [[0] * (n+1) for _ in range(m+1)]
#
# for i in range(m+1):
# for j in range(n+1):
# if i == 0 or j == 0:
# dp[i][j] = 0
# elif str1[i-1] == str2[j-1]:
# dp[i][j] = dp[i-1][j-1] + 1
# else:
# dp[i][j] = max(dp[i-1][j], dp[i][j-1])
#
# return dp[m][n]
#
# print(longest_common_subsequence("ACAYKP", "CAPCAK")) # Output: 4
# ```
Base model
meta-llama/Llama-3.2-1B-Instruct