File size: 1,178 Bytes
50bada8
 
8c965cd
50bada8
 
 
 
 
 
77bfbf0
 
50bada8
77bfbf0
50bada8
 
 
77bfbf0
 
50bada8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77bfbf0
50bada8
 
77bfbf0
50bada8
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from huggingface_hub import InferenceClient
import os

hf_key = os.getenv("HF_KEY").strip()
# Initialize the Hugging Face client
client = InferenceClient(api_key=hf_key)

# Define the chatbot function
def chat_with_model(text, image_url=None):
    # Prepare messages
    messages = [{"role": "user", "content": {"type": "text", "text": text}}]
    if image_url:
        messages.append({
            "role": "user",
            "content": {
                "type": "image_url",
                "image_url": {"url": image_url}
            }
        })
    
    # Get response from the model
    response = client.chat.completions.create(
        model="meta-llama/Llama-3.2-11B-Vision-Instruct",
        messages=messages,
        max_tokens=500
    )
    
    return response.choices[0].message["content"]

# Create the Gradio interface
ui = gr.Interface(
    fn=chat_with_model,
    inputs=[
        gr.Textbox(label="Enter your message"),
        gr.Textbox(label="Optional Image URL (Leave empty if not needed)")
    ],
    outputs=gr.Textbox(label="Response from the chatbot"),
    title="AI Chatbot with Image Processing"
)

# Launch the UI
ui.launch()