ajeetkumar01 commited on
Commit
b492a07
·
verified ·
1 Parent(s): 8d9979f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -14
app.py CHANGED
@@ -1,14 +1,14 @@
1
- import gradio as gr
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
4
 
5
  # Check if GPU is available, otherwise use CPU
6
- device = "cuda" if torch.cuda.is_available() else "cpu"
7
 
8
  # Load pre-trained GPT-2 model and tokenizer
9
  model_name = "gpt2-large"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
- model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
12
 
13
 
14
  def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_repeat_ngram_size=2):
@@ -35,7 +35,7 @@ def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_re
35
  return generated_text
36
 
37
 
38
- def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True, top_p=0.9):
39
  """
40
  Generate text with nucleus sampling based on the given input text.
41
 
@@ -58,14 +58,11 @@ def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True,
58
 
59
 
60
  # Create Gradio interface
61
- input_textbox = gr.Textbox(lines=7, label="Input Text", placeholder="Enter your text here...")
62
- output_textbox = gr.Textbox(label="Generated Text", placeholder="Generated text will appear here...")
63
 
64
- gr.Interface(
65
- [generate_text, generate_text_with_nucleus_search],
66
- inputs=input_textbox,
67
- outputs=output_textbox,
68
- title="Text Generation with GPT-2",
69
- description="Enter some text and generate new text using GPT-2 model.",
70
- allow_flagging=False
71
- ).launch(share=True)
 
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import gradio as gr
4
 
5
  # Check if GPU is available, otherwise use CPU
6
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
7
 
8
  # Load pre-trained GPT-2 model and tokenizer
9
  model_name = "gpt2-large"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+ model = AutoModelForCausalLM.from_pretrained(model_name)
12
 
13
 
14
  def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_repeat_ngram_size=2):
 
35
  return generated_text
36
 
37
 
38
+ def generate_text_with_nucleus_search(input_text, max_length=128, do_sample=True, top_p=0.9):
39
  """
40
  Generate text with nucleus sampling based on the given input text.
41
 
 
58
 
59
 
60
  # Create Gradio interface
61
+ input_text = gr.Textbox(lines=10, label="Input Text", placeholder="Enter text for text generation...")
62
+ output_text = gr.Textbox(label="Generated Text")
63
 
64
+ gr.Interface(generate_text, input_text, output_text,
65
+ title="Text Generation with GPT-2",
66
+ description="Generate text using the GPT-2 model.",
67
+ theme="huggingface",
68
+ allow_flagging="never").launch(share=True)