leilaaaaa commited on
Commit
1a70836
·
verified ·
1 Parent(s): 127dbc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -22
app.py CHANGED
@@ -32,10 +32,8 @@ def respond(
32
  if val[1]:
33
  messages.append({"role": "assistant", "content": val[1]})
34
 
35
- messages.append({"role": "user", "content": message})
36
-
37
  if image:
38
- # Convert image(s) to base64
39
  if isinstance(image, Image.Image):
40
  image_b64 = image_to_base64(image)
41
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
@@ -44,9 +42,10 @@ def respond(
44
  image_b64 = image_to_base64(img)
45
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
46
 
 
 
47
  try:
48
  responses = []
49
- generated_image = None
50
 
51
  for response in client.chat_completion(
52
  messages,
@@ -58,18 +57,10 @@ def respond(
58
  token = response.choices[0].delta.content
59
  responses.append(token)
60
 
61
- # Check if the response contains an image to be displayed
62
- if response.choices[0].delta.image:
63
- image_b64 = response.choices[0].delta.image
64
- image_data = base64.b64decode(image_b64)
65
- generated_image = Image.open(io.BytesIO(image_data))
66
- # Optionally convert to RGB if needed
67
- # generated_image = generated_image.convert("RGB")
68
-
69
- return responses, generated_image
70
 
71
  except Exception as e:
72
- return [str(e)], None
73
 
74
  # Debugging print statements
75
  print("Starting Gradio interface setup...")
@@ -78,15 +69,12 @@ try:
78
  demo = gr.Interface(
79
  fn=respond,
80
  inputs=[
81
- gr.Textbox(label="Message"),
82
- gr.Image(label="Upload Medical Image (Optional)", type="pil")
83
- ],
84
- outputs=[
85
- gr.Textbox(label="Response", placeholder="Model response will appear here..."),
86
- gr.Image(label="Generated Image", type="pil", output=True)
87
  ],
88
- title="LLAVA Model - Medical Image and Text",
89
- description="Upload a medical image and ask a specific question about the image or provide text input for a medical description.",
 
90
  additional_inputs=[
91
  gr.Textbox(label="System message", value="You are a friendly Chatbot."),
92
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
32
  if val[1]:
33
  messages.append({"role": "assistant", "content": val[1]})
34
 
 
 
35
  if image:
36
+ # Convert image to base64
37
  if isinstance(image, Image.Image):
38
  image_b64 = image_to_base64(image)
39
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
 
42
  image_b64 = image_to_base64(img)
43
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
44
 
45
+ messages.append({"role": "user", "content": message})
46
+
47
  try:
48
  responses = []
 
49
 
50
  for response in client.chat_completion(
51
  messages,
 
57
  token = response.choices[0].delta.content
58
  responses.append(token)
59
 
60
+ return responses
 
 
 
 
 
 
 
 
61
 
62
  except Exception as e:
63
+ return [str(e)]
64
 
65
  # Debugging print statements
66
  print("Starting Gradio interface setup...")
 
69
  demo = gr.Interface(
70
  fn=respond,
71
  inputs=[
72
+ gr.Image(label="Upload Medical Image", type="pil", optional=True),
73
+ gr.Textbox(label="Message")
 
 
 
 
74
  ],
75
+ outputs=gr.Textbox(label="Response", placeholder="Model response will appear here..."),
76
+ title="LLAVA Model - Medical Image and Question",
77
+ description="Upload a medical image and ask a specific question about the image for a medical description.",
78
  additional_inputs=[
79
  gr.Textbox(label="System message", value="You are a friendly Chatbot."),
80
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),