Update app.py
Browse files
app.py
CHANGED
@@ -7,16 +7,12 @@ from huggingface_hub import InferenceClient
|
|
7 |
# Initialize the Hugging Face Inference Client
|
8 |
client = InferenceClient("microsoft/llava-med-7b-delta")
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
buffered = io.BytesIO()
|
17 |
-
image.save(buffered, format="PNG")
|
18 |
-
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
19 |
-
return img_str
|
20 |
|
21 |
# Function to interact with LLAVA model
|
22 |
def respond(
|
@@ -39,14 +35,8 @@ def respond(
|
|
39 |
messages.append({"role": "user", "content": message})
|
40 |
|
41 |
if image:
|
42 |
-
#
|
43 |
-
|
44 |
-
image_b64 = Base64ImageField().preprocess(image)
|
45 |
-
messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
|
46 |
-
else:
|
47 |
-
for img in image:
|
48 |
-
image_b64 = Base64ImageField().preprocess(img)
|
49 |
-
messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
|
50 |
|
51 |
# Call Hugging Face model for response
|
52 |
try:
|
|
|
7 |
# Initialize the Hugging Face Inference Client
|
8 |
client = InferenceClient("microsoft/llava-med-7b-delta")
|
9 |
|
10 |
+
# Function to encode image as base64 (optional if Gradio handles image conversion)
|
11 |
+
def image_to_base64(image):
|
12 |
+
buffered = io.BytesIO()
|
13 |
+
image.save(buffered, format="PNG")
|
14 |
+
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
15 |
+
return img_str
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Function to interact with LLAVA model
|
18 |
def respond(
|
|
|
35 |
messages.append({"role": "user", "content": message})
|
36 |
|
37 |
if image:
|
38 |
+
# Gradio handles image processing internally, so no need for manual base64 encoding
|
39 |
+
messages.append({"role": "user", "content": "Image uploaded"})
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
# Call Hugging Face model for response
|
42 |
try:
|