Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
-
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
4 |
|
5 |
# Check if GPU is available, otherwise use CPU
|
6 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
# Load pre-trained GPT-2 model and tokenizer
|
9 |
model_name = "gpt2-large"
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
12 |
|
13 |
|
14 |
def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_repeat_ngram_size=2):
|
@@ -35,7 +35,7 @@ def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_re
|
|
35 |
return generated_text
|
36 |
|
37 |
|
38 |
-
def generate_text_with_nucleus_search(input_text, max_length=
|
39 |
"""
|
40 |
Generate text with nucleus sampling based on the given input text.
|
41 |
|
@@ -58,14 +58,11 @@ def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True,
|
|
58 |
|
59 |
|
60 |
# Create Gradio interface
|
61 |
-
|
62 |
-
|
63 |
|
64 |
-
gr.Interface(
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
description="Enter some text and generate new text using GPT-2 model.",
|
70 |
-
allow_flagging=False
|
71 |
-
).launch(share=True)
|
|
|
|
|
1 |
import torch
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
import gradio as gr
|
4 |
|
5 |
# Check if GPU is available, otherwise use CPU
|
6 |
+
# device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
# Load pre-trained GPT-2 model and tokenizer
|
9 |
model_name = "gpt2-large"
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
12 |
|
13 |
|
14 |
def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_repeat_ngram_size=2):
|
|
|
35 |
return generated_text
|
36 |
|
37 |
|
38 |
+
def generate_text_with_nucleus_search(input_text, max_length=128, do_sample=True, top_p=0.9):
|
39 |
"""
|
40 |
Generate text with nucleus sampling based on the given input text.
|
41 |
|
|
|
58 |
|
59 |
|
60 |
# Create Gradio interface
|
61 |
+
input_text = gr.Textbox(lines=10, label="Input Text", placeholder="Enter text for text generation...")
|
62 |
+
output_text = gr.Textbox(label="Generated Text")
|
63 |
|
64 |
+
gr.Interface(generate_text, input_text, output_text,
|
65 |
+
title="Text Generation with GPT-2",
|
66 |
+
description="Generate text using the GPT-2 model.",
|
67 |
+
theme="huggingface",
|
68 |
+
allow_flagging="never").launch(share=True)
|
|
|
|
|
|