ManojINaik commited on
Commit
d56e863
·
verified ·
1 Parent(s): 309768d
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -1,15 +1,22 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load a more suitable model for conversational responses
5
- model_name = "gpt2" # You might want to try 'gpt-neo' or 'gpt-3.5-turbo' if available
6
  generator = pipeline("text-generation", model=model_name)
7
 
8
  # Inference function
9
  def generate_response(prompt):
10
- # Generate text with a more structured approach
11
- response = generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']
12
- return response.strip() # Clean up any leading/trailing whitespace
 
 
 
 
 
 
 
13
 
14
  # Gradio interface
15
  interface = gr.Interface(
@@ -17,7 +24,7 @@ interface = gr.Interface(
17
  inputs="text",
18
  outputs="text",
19
  title="Conversational LLM",
20
- description="Enter a message to receive a relevant response."
21
  )
22
 
23
  # Launch the interface
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load the model
5
+ model_name = "gpt2"
6
  generator = pipeline("text-generation", model=model_name)
7
 
8
  # Inference function
9
  def generate_response(prompt):
10
+ # Generate text with specific parameters
11
+ response = generator(
12
+ prompt,
13
+ max_length=150, # Increase max length for more comprehensive responses
14
+ num_return_sequences=1,
15
+ temperature=0.7, # Lower for more deterministic responses
16
+ top_k=50, # Consider the top 50 tokens for diversity
17
+ top_p=0.95 # Cumulative probability for diversity
18
+ )
19
+ return response[0]['generated_text'].strip() # Clean up the output
20
 
21
  # Gradio interface
22
  interface = gr.Interface(
 
24
  inputs="text",
25
  outputs="text",
26
  title="Conversational LLM",
27
+ description="Enter a prompt to generate a relevant and coherent response."
28
  )
29
 
30
  # Launch the interface