basic_llm / app.py
savan360's picture
Update app.py
ef74a61 verified
raw
history blame
839 Bytes
import gradio as gr
from transformers import pipeline
# Create a text-generation pipeline using GPT-2
generator = pipeline('text-generation', model='gpt2')
def generate_text(prompt):
generated = generator(
prompt,
max_length=30, # Limit the output length
do_sample=True, # Enable sampling for more natural responses
temperature=0.3, # Lower temperature for less randomness
repetition_penalty=1.5, # Penalize repeated tokens
no_repeat_ngram_size=2 # Avoid repeating any 2-word sequences
)
return generated[0]['generated_text']
iface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Simple LLM with Hugging Face & Gradio",
description="Enter a prompt and get a concise, factual answer."
)
iface.launch()