TejAndrewsACC commited on
Commit
50bada8
·
verified ·
1 Parent(s): 0847a82

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+
5
+ hf_key = os.getenv("HF_KEY").strip()
6
+ # Initialize the Hugging Face client
7
+ client = InferenceClient(api_key=hf_key)
8
+
9
+ # Define the chatbot function
10
+ def chat_with_model(text, image=None):
11
+ messages = [{"role": "user", "content": {"type": "text", "text": text}}]
12
+
13
+ # If an image is uploaded, encode it and send it as part of the payload
14
+ if image:
15
+ import base64
16
+ from PIL import Image
17
+ from io import BytesIO
18
+
19
+ # Convert image to base64
20
+ buffered = BytesIO()
21
+ image.save(buffered, format="PNG")
22
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
23
+
24
+ # Add the image content to the messages
25
+ messages.append({
26
+ "role": "user",
27
+ "content": {
28
+ "type": "image_base64",
29
+ "image_base64": img_str
30
+ }
31
+ })
32
+
33
+ # Get response from the model
34
+ response = client.chat.completions.create(
35
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
36
+ messages=messages,
37
+ max_tokens=500
38
+ )
39
+
40
+ return response.choices[0].message["content"]
41
+
42
+ # Create the Gradio interface
43
+ ui = gr.Interface(
44
+ fn=chat_with_model,
45
+ inputs=[
46
+ gr.Textbox(label="Enter your message"),
47
+ gr.Image(type="pil", label="Upload an Image (Optional)")
48
+ ],
49
+ outputs=gr.Textbox(label="Response from the chatbot"),
50
+ title="AI Chatbot with Direct Image Upload"
51
+ )
52
+
53
+ # Launch the UI
54
+ ui.launch()