Spaces:
Sleeping
Sleeping
File size: 786 Bytes
49c0097 959e25e 49c0097 309768d 959e25e 49c0097 959e25e 309768d 49c0097 959e25e 309768d 49c0097 959e25e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
import gradio as gr
from transformers import pipeline
# Load a more suitable model for conversational responses
model_name = "gpt2" # You might want to try 'gpt-neo' or 'gpt-3.5-turbo' if available
generator = pipeline("text-generation", model=model_name)
# Inference function
def generate_response(prompt):
# Generate text with a more structured approach
response = generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']
return response.strip() # Clean up any leading/trailing whitespace
# Gradio interface
interface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="Conversational LLM",
description="Enter a message to receive a relevant response."
)
# Launch the interface
interface.launch()
|