Spaces:
Sleeping
Sleeping
Update space
Browse files- Text2Long_text.py +2 -2
Text2Long_text.py
CHANGED
@@ -9,7 +9,7 @@ tokenizer = AutoTokenizer.from_pretrained("skt/kogpt2-base-v2")
|
|
9 |
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
|
10 |
|
11 |
# 3. νκ΅μ΄ μμ€ μμ± ν¨μ
|
12 |
-
def generate_korean_story(prompt, max_length=
|
13 |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
14 |
|
15 |
outputs = model.generate(
|
@@ -31,7 +31,7 @@ def generate_korean_story(prompt, max_length=300):
|
|
31 |
# 4. μ€ν
|
32 |
if __name__ == "__main__":
|
33 |
user_prompt = input("π μμ€μ μμ λ¬Έμ₯μ μ
λ ₯νμΈμ (νκ΅μ΄): ")
|
34 |
-
result = generate_korean_story(user_prompt, max_length=
|
35 |
|
36 |
print("\nπ μμ±λ νκ΅μ΄ μμ€:\n")
|
37 |
print(result)
|
|
|
9 |
model = AutoModelForCausalLM.from_pretrained("skt/kogpt2-base-v2").to(device)
|
10 |
|
11 |
# 3. νκ΅μ΄ μμ€ μμ± ν¨μ
|
12 |
+
def generate_korean_story(prompt, max_length=100):
|
13 |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
14 |
|
15 |
outputs = model.generate(
|
|
|
31 |
# 4. μ€ν
|
32 |
if __name__ == "__main__":
|
33 |
user_prompt = input("π μμ€μ μμ λ¬Έμ₯μ μ
λ ₯νμΈμ (νκ΅μ΄): ")
|
34 |
+
result = generate_korean_story(user_prompt, max_length=100)
|
35 |
|
36 |
print("\nπ μμ±λ νκ΅μ΄ μμ€:\n")
|
37 |
print(result)
|