TejAndrewsACC's picture
Update app.py
8c965cd verified
import gradio as gr
from huggingface_hub import InferenceClient
import os
hf_key = os.getenv("HF_KEY").strip()
# Initialize the Hugging Face client
client = InferenceClient(api_key=hf_key)
# Define the chatbot function
def chat_with_model(text, image_url=None):
# Prepare messages
messages = [{"role": "user", "content": {"type": "text", "text": text}}]
if image_url:
messages.append({
"role": "user",
"content": {
"type": "image_url",
"image_url": {"url": image_url}
}
})
# Get response from the model
response = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
max_tokens=500
)
return response.choices[0].message["content"]
# Create the Gradio interface
ui = gr.Interface(
fn=chat_with_model,
inputs=[
gr.Textbox(label="Enter your message"),
gr.Textbox(label="Optional Image URL (Leave empty if not needed)")
],
outputs=gr.Textbox(label="Response from the chatbot"),
title="AI Chatbot with Image Processing"
)
# Launch the UI
ui.launch()