ManojINaik commited on
Commit
309768d
·
verified ·
1 Parent(s): 959e25e
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -1,20 +1,24 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load a model (use a suitable model available on Hugging Face)
5
- model_name = "gpt2" # Change to your desired model
6
  generator = pipeline("text-generation", model=model_name)
7
 
8
  # Inference function
9
- def generate_text(prompt):
10
- return generator(prompt, max_length=50)[0]['generated_text']
 
 
11
 
12
  # Gradio interface
13
- interface = gr.Interface(fn=generate_text,
14
- inputs="text",
15
- outputs="text",
16
- title="Text Generation with GPT-2",
17
- description="Enter a prompt to generate text.")
 
 
18
 
19
  # Launch the interface
20
  interface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load a more suitable model for conversational responses
5
+ model_name = "gpt2" # You might want to try 'gpt-neo' or 'gpt-3.5-turbo' if available
6
  generator = pipeline("text-generation", model=model_name)
7
 
8
  # Inference function
9
+ def generate_response(prompt):
10
+ # Generate text with a more structured approach
11
+ response = generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']
12
+ return response.strip() # Clean up any leading/trailing whitespace
13
 
14
  # Gradio interface
15
+ interface = gr.Interface(
16
+ fn=generate_response,
17
+ inputs="text",
18
+ outputs="text",
19
+ title="Conversational LLM",
20
+ description="Enter a message to receive a relevant response."
21
+ )
22
 
23
  # Launch the interface
24
  interface.launch()