savan360 commited on
Commit
54a5af7
·
verified ·
1 Parent(s): b8e372e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -10
app.py CHANGED
@@ -1,29 +1,26 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Create a text-generation pipeline using GPT-2
5
  generator = pipeline('text-generation', model='gpt2')
6
 
7
  def generate_text(prompt):
8
- # Adjust temperature to make output more focused
9
  generated = generator(
10
  prompt,
11
- max_length=50,
12
- num_return_sequences=1,
13
- temperature=0.2, # Lower temperature for less randomness
14
- top_k=50, # Optional: limit the number of choices
15
- top_p=0.95 # Optional: nucleus sampling
16
  )
17
  return generated[0]['generated_text']
18
 
19
-
20
- # Create a Gradio interface with one text input and one text output
21
  iface = gr.Interface(
22
  fn=generate_text,
23
  inputs="text",
24
  outputs="text",
25
  title="Simple LLM with Hugging Face & Gradio",
26
- description="Enter a prompt and get text generated by a basic GPT-2 model."
27
  )
28
 
29
  # Launch the interface
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Create a text-generation pipeline using GPT-2 with modified parameters
5
  generator = pipeline('text-generation', model='gpt2')
6
 
7
  def generate_text(prompt):
8
+ # Use a lower temperature and limit the max_length for concise output
9
  generated = generator(
10
  prompt,
11
+ max_length=30, # Limit the maximum length of the output
12
+ do_sample=False, # Disable sampling for deterministic output
13
+ temperature=0.2 # Lower temperature to reduce randomness
 
 
14
  )
15
  return generated[0]['generated_text']
16
 
17
+ # Create a Gradio interface
 
18
  iface = gr.Interface(
19
  fn=generate_text,
20
  inputs="text",
21
  outputs="text",
22
  title="Simple LLM with Hugging Face & Gradio",
23
+ description="Enter a prompt and get a concise text generated by GPT-2."
24
  )
25
 
26
  # Launch the interface