Jonathanmann commited on
Commit
9712ef3
·
verified ·
1 Parent(s): afeb272

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -1,38 +1,39 @@
1
- import os
2
  import gradio as gr
3
  from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
 
4
 
 
5
  HF_TOKEN = os.getenv("HF_TOKEN")
 
6
 
7
- # Load the tokenizer and model explicitly as GPT2 to avoid config issues
8
- model_name = 'Jonathanmann/GPT2-medium-SADnov21'
9
  tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
10
  model = GPT2LMHeadModel.from_pretrained(model_name, use_auth_token=HF_TOKEN)
11
 
12
  # Define the text generation pipeline
13
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
14
 
15
  # Define a function for generating text
16
  def generate_text(prompt, max_length, temperature, top_k, top_p):
17
  response = generator(
18
- prompt,
19
  max_length=max_length,
20
  temperature=temperature,
21
  top_k=top_k,
22
  top_p=top_p,
23
  num_return_sequences=1
24
  )
25
- return response[0]['generated_text']
26
 
27
- # Define the Gradio interface
28
  demo = gr.Interface(
29
  fn=generate_text,
30
  inputs=[
31
  gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
32
- gr.Slider(minimum=20, maximum=200, default=50, label="Max Length"),
33
- gr.Slider(minimum=0.1, maximum=1.0, default=0.7, label="Temperature"),
34
- gr.Slider(minimum=1, maximum=100, default=50, label="Top-k"),
35
- gr.Slider(minimum=0.1, maximum=1.0, default=0.9, label="Top-p")
36
  ],
37
  outputs="text",
38
  title="GPT-2 Text Generation",
 
 
1
  import gradio as gr
2
  from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
3
+ import os
4
 
5
+ # Define your model details
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
+ model_name = "Jonathanmann/GPT2-medium-SADnov21"
8
 
9
+ # Load tokenizer and model from Hugging Face
 
10
  tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
11
  model = GPT2LMHeadModel.from_pretrained(model_name, use_auth_token=HF_TOKEN)
12
 
13
  # Define the text generation pipeline
14
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
15
 
16
  # Define a function for generating text
17
  def generate_text(prompt, max_length, temperature, top_k, top_p):
18
  response = generator(
19
+ prompt,
20
  max_length=max_length,
21
  temperature=temperature,
22
  top_k=top_k,
23
  top_p=top_p,
24
  num_return_sequences=1
25
  )
26
+ return response[0]["generated_text"]
27
 
28
+ # Create the Gradio interface
29
  demo = gr.Interface(
30
  fn=generate_text,
31
  inputs=[
32
  gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
33
+ gr.Slider(minimum=20, maximum=200, value=50, label="Max Length"),
34
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature"),
35
+ gr.Slider(minimum=1, maximum=100, value=50, label="Top-k"),
36
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, label="Top-p")
37
  ],
38
  outputs="text",
39
  title="GPT-2 Text Generation",