davidgaofc's picture
app
964d3aa
raw
history blame
760 Bytes
import gradio as gr
from transformers import pipeline
# Load your Hugging Face model
model = pipeline('text-generation', model='gpt2') # Replace 'gpt2' with your model
def predict(input_text):
# Generate output using the model
output = model(input_text, max_length=50) # Adjust parameters as needed
return output[0]['generated_text']
# Create the Gradio interface
interface = gr.Interface(fn=predict,
inputs=gr.inputs.Textbox(lines=2, placeholder="Type something here..."),
outputs='text',
title="Hugging Face Model Inference",
description="Type in some text and see how the model responds!")
if __name__ == "__main__":
interface.launch()