File size: 1,308 Bytes
b7f426b 7e42f7f b7f426b 7e42f7f b7f426b 7e42f7f b7f426b 7e42f7f b7f426b 7e42f7f b7f426b 7e42f7f b7f426b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
# Read your HF token from secret
client = InferenceClient(
provider="sambanova",
api_key=os.environ["HF_TOKEN"],
)
def llama4_image_chat(image_url, question):
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": question},
{
"type": "image_url",
"image_url": {"url": image_url}
}
]
}
]
completion = client.chat.completions.create(
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
messages=messages
)
return completion.choices[0].message.content
with gr.Blocks() as demo:
gr.Markdown("## 🦙 LLaMA 4 Visual Chat")
gr.Markdown("Upload an image URL and ask a question.")
with gr.Row():
image_url_input = gr.Textbox(label="Image URL", placeholder="Paste image URL here...")
question_input = gr.Textbox(label="Question", placeholder="e.g., Describe this image in one sentence.")
submit_btn = gr.Button("Ask LLaMA 4")
output_box = gr.Textbox(label="Response", lines=6)
submit_btn.click(fn=llama4_image_chat, inputs=[image_url_input, question_input], outputs=output_box)
demo.launch()
|