basic_llm / app.py
savan360's picture
Update app.py
2aa3b64 verified
raw
history blame
750 Bytes
import gradio as gr
from transformers import pipeline
# Use a different LLM (GPT-Neo instead of GPT-2)
generator = pipeline('text-generation', model='EleutherAI/gpt-neo-125M')
def generate_text(prompt):
generated = generator(
prompt,
max_length=20, # Limit response length
do_sample=False, # Make output deterministic
temperature=0.1, # Reduce randomness
repetition_penalty=2.0 # Prevent repeating words
)
return generated[0]['generated_text']
# Create the Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Ask Any Question",
description="Ask a question and get an answer using GPT-Neo."
)
iface.launch()