leilaaaaa commited on
Commit
9e4b9cd
·
verified ·
1 Parent(s): 1dcce9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -35,18 +35,18 @@ def respond(
35
  messages.append({"role": "user", "content": message})
36
 
37
  if image:
38
- # Convert image to base64
39
  if isinstance(image, Image.Image):
40
  image_b64 = image_to_base64(image)
41
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
42
  else:
43
- # Handle multiple images if necessary
44
  for img in image:
45
  image_b64 = image_to_base64(img)
46
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
47
 
48
  # Call Hugging Face model for response
49
  try:
 
50
  for response in client.chat_completion(
51
  messages,
52
  max_tokens=max_tokens,
@@ -54,11 +54,20 @@ def respond(
54
  temperature=temperature,
55
  top_p=top_p,
56
  ):
57
- if response.choices:
58
- token = response.choices[0].delta.content
59
- yield token
 
 
 
 
 
 
 
 
 
60
  except Exception as e:
61
- yield str(e)
62
 
63
  # Debugging print statements
64
  print("Starting Gradio interface setup...")
@@ -68,9 +77,12 @@ try:
68
  fn=respond,
69
  inputs=[
70
  gr.Textbox(label="Message"),
71
- gr.Image(label="Upload Medical Image", type="pil")
 
 
 
 
72
  ],
73
- outputs=gr.Textbox(label="Response", placeholder="Model response will appear here..."),
74
  title="LLAVA Model - Medical Image and Question",
75
  description="Upload a medical image and ask a specific question about the image for a medical description.",
76
  additional_inputs=[
 
35
  messages.append({"role": "user", "content": message})
36
 
37
  if image:
38
+ # Convert image(s) to base64
39
  if isinstance(image, Image.Image):
40
  image_b64 = image_to_base64(image)
41
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
42
  else:
 
43
  for img in image:
44
  image_b64 = image_to_base64(img)
45
  messages.append({"role": "user", "content": "Image uploaded", "image": image_b64})
46
 
47
  # Call Hugging Face model for response
48
  try:
49
+ responses = []
50
  for response in client.chat_completion(
51
  messages,
52
  max_tokens=max_tokens,
 
54
  temperature=temperature,
55
  top_p=top_p,
56
  ):
57
+ token = response.choices[0].delta.content
58
+ responses.append(token)
59
+
60
+ # Check if the response contains an image to be displayed
61
+ if response.choices[0].delta.image:
62
+ image_b64 = response.choices[0].delta.image
63
+ image_data = base64.b64decode(image_b64)
64
+ image = Image.open(io.BytesIO(image_data))
65
+ yield responses, image
66
+ else:
67
+ yield responses, None
68
+
69
  except Exception as e:
70
+ yield [str(e)], None
71
 
72
  # Debugging print statements
73
  print("Starting Gradio interface setup...")
 
77
  fn=respond,
78
  inputs=[
79
  gr.Textbox(label="Message"),
80
+ gr.Image(label="Upload Medical Image", type="pil", optional=True)
81
+ ],
82
+ outputs=[
83
+ gr.Textbox(label="Response", placeholder="Model response will appear here..."),
84
+ gr.Image(label="Generated Image", type="pil", output=True)
85
  ],
 
86
  title="LLAVA Model - Medical Image and Question",
87
  description="Upload a medical image and ask a specific question about the image for a medical description.",
88
  additional_inputs=[