Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -67,6 +67,7 @@ import gradio as gr
|
|
67 |
from huggingface_hub import InferenceClient
|
68 |
from PIL import Image
|
69 |
import io
|
|
|
70 |
|
71 |
client = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
|
72 |
|
@@ -91,14 +92,15 @@ def respond(
|
|
91 |
# Add the current user message
|
92 |
messages.append({"role": "user", "content": message})
|
93 |
|
94 |
-
# Convert the
|
95 |
image_bytes = io.BytesIO()
|
96 |
image.save(image_bytes, format='PNG')
|
97 |
image_bytes.seek(0)
|
|
|
98 |
|
99 |
# Use InferenceClient to handle the image and text input to the model
|
100 |
-
# Pass the
|
101 |
-
response_data = client.text_to_image(images=
|
102 |
|
103 |
# Process the response from the model
|
104 |
response = ""
|
@@ -122,4 +124,3 @@ demo = gr.ChatInterface(
|
|
122 |
|
123 |
if __name__ == "__main__":
|
124 |
demo.launch(share=True) # Set share=True to create a public link
|
125 |
-
|
|
|
67 |
from huggingface_hub import InferenceClient
|
68 |
from PIL import Image
|
69 |
import io
|
70 |
+
import base64
|
71 |
|
72 |
client = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
|
73 |
|
|
|
92 |
# Add the current user message
|
93 |
messages.append({"role": "user", "content": message})
|
94 |
|
95 |
+
# Convert the image to a base64-encoded string
|
96 |
image_bytes = io.BytesIO()
|
97 |
image.save(image_bytes, format='PNG')
|
98 |
image_bytes.seek(0)
|
99 |
+
image_base64 = base64.b64encode(image_bytes.getvalue()).decode('utf-8')
|
100 |
|
101 |
# Use InferenceClient to handle the image and text input to the model
|
102 |
+
# Pass the base64-encoded image as the input
|
103 |
+
response_data = client.text_to_image(images=image_base64, prompt=message) # Pass the base64 string as 'images'
|
104 |
|
105 |
# Process the response from the model
|
106 |
response = ""
|
|
|
124 |
|
125 |
if __name__ == "__main__":
|
126 |
demo.launch(share=True) # Set share=True to create a public link
|
|