Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -585,26 +585,35 @@ class XylariaChat:
|
|
| 585 |
|
| 586 |
def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
|
| 587 |
if message.strip().lower() == "/image":
|
| 588 |
-
chat_history.append([message,
|
| 589 |
-
yield "", chat_history, None, None
|
| 590 |
-
|
| 591 |
-
image_prompt =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 592 |
if not image_prompt:
|
| 593 |
image_prompt = "A realistic image"
|
| 594 |
|
| 595 |
image_bytes = self.generate_image(image_prompt)
|
|
|
|
| 596 |
if isinstance(image_bytes, bytes):
|
| 597 |
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
| 598 |
image_html = f'<img src="data:image/png;base64,{base64_image}" alt="Generated Image" style="max-width: 100%; max-height: 400px;">'
|
|
|
|
|
|
|
| 599 |
chat_history[-1][1] = image_html
|
| 600 |
|
| 601 |
self.conversation_history.append(ChatMessage(role="user", content=message).to_dict())
|
| 602 |
self.conversation_history.append(ChatMessage(role="assistant", content=image_html).to_dict())
|
| 603 |
-
|
| 604 |
-
yield "", chat_history, None, None
|
| 605 |
else:
|
| 606 |
-
chat_history[-1][1] = image_bytes
|
| 607 |
-
yield "", chat_history, None, None
|
| 608 |
return
|
| 609 |
|
| 610 |
ocr_text = ""
|
|
|
|
| 585 |
|
| 586 |
def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
|
| 587 |
if message.strip().lower() == "/image":
|
| 588 |
+
chat_history.append([message, loading_svg]) # Display loading SVG
|
| 589 |
+
yield "", chat_history, None, None
|
| 590 |
+
|
| 591 |
+
image_prompt = ""
|
| 592 |
+
# Find the last user message before the /image command for context
|
| 593 |
+
for i in range(len(chat_history) - 2, -1, -1):
|
| 594 |
+
if chat_history[i][0] and chat_history[i][0].strip().lower() != "/image":
|
| 595 |
+
image_prompt = chat_history[i][0]
|
| 596 |
+
break
|
| 597 |
+
|
| 598 |
if not image_prompt:
|
| 599 |
image_prompt = "A realistic image"
|
| 600 |
|
| 601 |
image_bytes = self.generate_image(image_prompt)
|
| 602 |
+
|
| 603 |
if isinstance(image_bytes, bytes):
|
| 604 |
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
| 605 |
image_html = f'<img src="data:image/png;base64,{base64_image}" alt="Generated Image" style="max-width: 100%; max-height: 400px;">'
|
| 606 |
+
|
| 607 |
+
# Replace loading SVG with the generated image
|
| 608 |
chat_history[-1][1] = image_html
|
| 609 |
|
| 610 |
self.conversation_history.append(ChatMessage(role="user", content=message).to_dict())
|
| 611 |
self.conversation_history.append(ChatMessage(role="assistant", content=image_html).to_dict())
|
| 612 |
+
|
| 613 |
+
yield "", chat_history, None, None
|
| 614 |
else:
|
| 615 |
+
chat_history[-1][1] = image_bytes # Error message
|
| 616 |
+
yield "", chat_history, None, None
|
| 617 |
return
|
| 618 |
|
| 619 |
ocr_text = ""
|