manojapi / app.py
ManojINaik's picture
Update2
309768d verified
raw
history blame
786 Bytes
import gradio as gr
from transformers import pipeline
# Load a more suitable model for conversational responses
model_name = "gpt2" # You might want to try 'gpt-neo' or 'gpt-3.5-turbo' if available
generator = pipeline("text-generation", model=model_name)
# Inference function
def generate_response(prompt):
# Generate text with a more structured approach
response = generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']
return response.strip() # Clean up any leading/trailing whitespace
# Gradio interface
interface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="Conversational LLM",
description="Enter a message to receive a relevant response."
)
# Launch the interface
interface.launch()