Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from huggingface_hub import InferenceClient
|
|
7 |
# Initialize the Hugging Face Inference Client
|
8 |
client = InferenceClient("microsoft/llava-med-7b-delta")
|
9 |
|
10 |
-
# Function to encode image as base64
|
11 |
def image_to_base64(image):
|
12 |
buffered = io.BytesIO()
|
13 |
image.save(buffered, format="PNG")
|
@@ -35,8 +35,14 @@ def respond(
|
|
35 |
messages.append({"role": "user", "content": message})
|
36 |
|
37 |
if image:
|
38 |
-
#
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
# Call Hugging Face model for response
|
42 |
try:
|
@@ -78,22 +84,4 @@ try:
|
|
78 |
],
|
79 |
outputs=[
|
80 |
gr.Textbox(label="Response", placeholder="Model response will appear here..."),
|
81 |
-
gr.Image(label="
|
82 |
-
],
|
83 |
-
title="LLAVA Model - Medical Image and Question",
|
84 |
-
description="Upload a medical image and ask a specific question about the image for a medical description.",
|
85 |
-
additional_inputs=[
|
86 |
-
gr.Textbox(label="System message", value="You are a friendly Chatbot."),
|
87 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
88 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
89 |
-
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
90 |
-
]
|
91 |
-
)
|
92 |
-
|
93 |
-
# Launch the Gradio interface
|
94 |
-
if __name__ == "__main__":
|
95 |
-
print("Launching Gradio interface...")
|
96 |
-
demo.launch()
|
97 |
-
|
98 |
-
except Exception as e:
|
99 |
-
print(f"Error during Gradio setup: {str(e)}")
|
|
|
7 |
# Initialize the Hugging Face Inference Client
|
8 |
client = InferenceClient("microsoft/llava-med-7b-delta")
|
9 |
|
10 |
+
# Function to encode image as base64
|
11 |
def image_to_base64(image):
|
12 |
buffered = io.BytesIO()
|
13 |
image.save(buffered, format="PNG")
|
|
|
35 |
messages.append({"role": "user", "content": message})
|
36 |
|
37 |
if image:
|
38 |
+
# Convert image(s) to base64
|
39 |
+
if isinstance(image, Image.Image):
|
40 |
+
image_b64 = image_to_base64(image)
|
41 |
+
messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
|
42 |
+
else:
|
43 |
+
for img in image:
|
44 |
+
image_b64 = image_to_base64(img)
|
45 |
+
messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
|
46 |
|
47 |
# Call Hugging Face model for response
|
48 |
try:
|
|
|
84 |
],
|
85 |
outputs=[
|
86 |
gr.Textbox(label="Response", placeholder="Model response will appear here..."),
|
87 |
+
gr.Image(label="Gene
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|