llama / app.py
hereoncollab's picture
Update app.py
279e516 verified
raw
history blame
727 Bytes
import gradio as gr
from transformers import pipeline
# Initialize the text generation pipeline with the Gemma 2-2B IT model
pipe = pipeline("text-generation", model="google/gemma-2-2b-it")
def generate_response(user_input):
# Generate text based on the user's input
response = pipe(user_input, max_length=100, num_return_sequences=1)
# Extract the generated text
generated_text = response[0]['generated_text']
return generated_text
# Create the Gradio interface
interface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(label="prompt:", lines=2, placeholder="prompt"),
outputs="text",
title="Gemma",
description="Prompt gemma-2b"
)
# Launch the Gradio app
interface.launch()