Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
3 |
|
|
|
|
|
|
|
|
|
4 |
def generate_diary(keywords):
|
5 |
-
# ๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ ๋ก๋
|
6 |
-
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
7 |
-
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
8 |
-
|
9 |
# ํค์๋ ๊ธฐ๋ฐ fine-tuning
|
10 |
input_ids = tokenizer.encode(" ".join(keywords), return_tensors="pt")
|
11 |
output = model.generate(input_ids, max_length=500, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, num_beams=5)
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
3 |
|
4 |
+
# ๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ๋ฅผ ์ ์ญ ๋ณ์๋ก ๋ฏธ๋ฆฌ ๋ก๋
|
5 |
+
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
6 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
7 |
+
|
8 |
def generate_diary(keywords):
|
|
|
|
|
|
|
|
|
9 |
# ํค์๋ ๊ธฐ๋ฐ fine-tuning
|
10 |
input_ids = tokenizer.encode(" ".join(keywords), return_tensors="pt")
|
11 |
output = model.generate(input_ids, max_length=500, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, num_beams=5)
|