Mubbashir Ahmed commited on
Commit
5196faf
·
1 Parent(s): cbd9126

updated app

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +34 -27
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ BACKUP.py
app.py CHANGED
@@ -2,44 +2,51 @@ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
- # Read your HF token from secret
6
  client = InferenceClient(
7
  provider="sambanova",
8
  api_key=os.environ["HF_TOKEN"],
9
  )
10
 
11
- def llama4_image_chat(image_url, question):
12
- messages = [
13
- {
14
- "role": "user",
15
- "content": [
16
- {"type": "text", "text": question},
17
- {
18
- "type": "image_url",
19
- "image_url": {"url": image_url}
20
- }
21
- ]
22
- }
23
- ]
24
-
25
- completion = client.chat.completions.create(
26
- model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
27
- messages=messages
28
- )
29
 
30
- return completion.choices[0].message.content
31
 
 
 
 
 
 
 
 
 
 
 
32
  with gr.Blocks() as demo:
33
- gr.Markdown("## 🦙 LLaMA 4 Visual Chat")
34
- gr.Markdown("Upload an image URL and ask a question.")
35
 
36
  with gr.Row():
37
- image_url_input = gr.Textbox(label="Image URL", placeholder="Paste image URL here...")
38
- question_input = gr.Textbox(label="Question", placeholder="e.g., Describe this image in one sentence.")
39
 
40
- submit_btn = gr.Button("Ask LLaMA 4")
41
- output_box = gr.Textbox(label="Response", lines=6)
42
 
43
- submit_btn.click(fn=llama4_image_chat, inputs=[image_url_input, question_input], outputs=output_box)
 
 
 
 
44
 
45
  demo.launch()
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Authenticate with Hugging Face
6
  client = InferenceClient(
7
  provider="sambanova",
8
  api_key=os.environ["HF_TOKEN"],
9
  )
10
 
11
+ def llama4_chat(image_url, text_prompt):
12
+ if not text_prompt:
13
+ return "Please enter a text question or prompt."
14
+
15
+ message_content = [{"type": "text", "text": text_prompt}]
16
+
17
+ if image_url:
18
+ message_content.append({
19
+ "type": "image_url",
20
+ "image_url": {"url": image_url}
21
+ })
 
 
 
 
 
 
 
22
 
23
+ messages = [{"role": "user", "content": message_content}]
24
 
25
+ try:
26
+ completion = client.chat.completions.create(
27
+ model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
28
+ messages=messages
29
+ )
30
+ return completion.choices[0].message.content
31
+ except Exception as e:
32
+ return f"Error: {e}"
33
+
34
+ # Gradio UI
35
  with gr.Blocks() as demo:
36
+ gr.Markdown("## 🦙 LLaMA 4 Chat (Text + Optional Image URL)")
37
+ gr.Markdown("Enter a text prompt. Optionally provide an image URL.")
38
 
39
  with gr.Row():
40
+ text_prompt_input = gr.Textbox(label="Text Prompt / Question", placeholder="What is happening in the image?", lines=2)
41
+ image_url_input = gr.Textbox(label="Optional Image URL", placeholder="https://example.com/image.jpg")
42
 
43
+ submit_btn = gr.Button("Generate Response")
44
+ output_box = gr.Textbox(label="LLaMA 4 Response", lines=8)
45
 
46
+ submit_btn.click(
47
+ fn=llama4_chat,
48
+ inputs=[image_url_input, text_prompt_input],
49
+ outputs=output_box
50
+ )
51
 
52
  demo.launch()