Spaces:
Sleeping
Sleeping
Commit
Β·
895d3e8
1
Parent(s):
60099e5
won
Browse files
app.py
CHANGED
@@ -1,7 +1,37 @@
|
|
1 |
-
import
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
# 1. λλ°μ΄μ€ μ€μ
|
5 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
6 |
|
7 |
+
# 2. νκ΅μ΄ GPT-2 λͺ¨λΈκ³Ό ν ν¬λμ΄μ λ‘λ
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
|
9 |
+
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
|
10 |
+
|
11 |
+
# 3. νκ΅μ΄ μμ€ μμ± ν¨μ
|
12 |
+
def generate_korean_story(prompt, max_length=300):
|
13 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
14 |
+
|
15 |
+
outputs = model.generate(
|
16 |
+
input_ids,
|
17 |
+
max_length=max_length,
|
18 |
+
min_length=100,
|
19 |
+
do_sample=True,
|
20 |
+
temperature=0.9,
|
21 |
+
top_k=50,
|
22 |
+
top_p=0.95,
|
23 |
+
repetition_penalty=1.2,
|
24 |
+
no_repeat_ngram_size=3,
|
25 |
+
eos_token_id=tokenizer.eos_token_id
|
26 |
+
)
|
27 |
+
|
28 |
+
story = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
29 |
+
return story
|
30 |
+
|
31 |
+
# 4. μ€ν
|
32 |
+
if __name__ == "__main__":
|
33 |
+
user_prompt = input("π μμ€μ μμ λ¬Έμ₯μ μ
λ ₯νμΈμ (νκ΅μ΄): ")
|
34 |
+
result = generate_korean_story(user_prompt, max_length=500)
|
35 |
+
|
36 |
+
print("\nπ μμ±λ νκ΅μ΄ μμ€:\n")
|
37 |
+
print(result)
|