import gradio as gr from transformers import pipeline # Create a text-generation pipeline using GPT-2 with modified parameters generator = pipeline('text-generation', model='gpt2') def generate_text(prompt): # Use a lower temperature and limit the max_length for concise output generated = generator( prompt, max_length=30, # Limit the maximum length of the output do_sample=False, # Disable sampling for deterministic output temperature=0.2 # Lower temperature to reduce randomness ) return generated[0]['generated_text'] # Create a Gradio interface iface = gr.Interface( fn=generate_text, inputs="text", outputs="text", title="Simple LLM with Hugging Face & Gradio", description="Enter a prompt and get a concise text generated by GPT-2." ) # Launch the interface iface.launch()