project / Text2Long_text.py
rnlduatm's picture
Update space
d8e88f0
raw
history blame
1.19 kB
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# 1. λ””λ°”μ΄μŠ€ μ„€μ •
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 2. ν•œκ΅­μ–΄ GPT-2 λͺ¨λΈκ³Ό ν† ν¬λ‚˜μ΄μ € λ‘œλ“œ
tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
# 3. ν•œκ΅­μ–΄ μ†Œμ„€ 생성 ν•¨μˆ˜
def generate_korean_story(prompt, max_length=300):
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
outputs = model.generate(
input_ids,
max_length=max_length,
min_length=100,
do_sample=True,
temperature=0.9,
top_k=50,
top_p=0.95,
repetition_penalty=1.2,
no_repeat_ngram_size=3,
eos_token_id=tokenizer.eos_token_id
)
story = tokenizer.decode(outputs[0], skip_special_tokens=True)
return story
# 4. μ‹€ν–‰
if __name__ == "__main__":
user_prompt = input("πŸ“œ μ†Œμ„€μ˜ μ‹œμž‘ λ¬Έμž₯을 μž…λ ₯ν•˜μ„Έμš” (ν•œκ΅­μ–΄): ")
result = generate_korean_story(user_prompt, max_length=500)
print("\nπŸ“– μƒμ„±λœ ν•œκ΅­μ–΄ μ†Œμ„€:\n")
print(result)