|
import os |
|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
client = InferenceClient( |
|
provider="sambanova", |
|
api_key=os.environ["HF_TOKEN"], |
|
) |
|
|
|
def llama4_image_chat(image_url, question): |
|
messages = [ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{"type": "text", "text": question}, |
|
{ |
|
"type": "image_url", |
|
"image_url": {"url": image_url} |
|
} |
|
] |
|
} |
|
] |
|
|
|
completion = client.chat.completions.create( |
|
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct", |
|
messages=messages |
|
) |
|
|
|
return completion.choices[0].message.content |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## 🦙 LLaMA 4 Visual Chat") |
|
gr.Markdown("Upload an image URL and ask a question.") |
|
|
|
with gr.Row(): |
|
image_url_input = gr.Textbox(label="Image URL", placeholder="Paste image URL here...") |
|
question_input = gr.Textbox(label="Question", placeholder="e.g., Describe this image in one sentence.") |
|
|
|
submit_btn = gr.Button("Ask LLaMA 4") |
|
output_box = gr.Textbox(label="Response", lines=6) |
|
|
|
submit_btn.click(fn=llama4_image_chat, inputs=[image_url_input, question_input], outputs=output_box) |
|
|
|
demo.launch() |
|
|