Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -169,7 +169,7 @@ class ExtendedQuantumDocumentManager(QuantumDocumentManager):
|
|
169 |
emb = clip_model.get_image_features(**inputs)
|
170 |
embeddings.append(emb.numpy())
|
171 |
valid_images.append(img_path)
|
172 |
-
except FileNotFoundError
|
173 |
logger.warning(f"Image file not found: {img_path}. Skipping this file.")
|
174 |
except Exception as e:
|
175 |
logger.exception(f"Error processing image {img_path}: {str(e)}")
|
@@ -430,7 +430,7 @@ class MultiModalRetriever:
|
|
430 |
def _retrieve_images(self, query: str) -> List[str]:
|
431 |
inputs = self.clip_processor(text=query, return_tensors="pt")
|
432 |
with torch.no_grad():
|
433 |
-
|
434 |
return ["image_result_1.png", "image_result_2.png"]
|
435 |
|
436 |
def _retrieve_code(self, query: str) -> List[str]:
|
@@ -628,6 +628,33 @@ class ResearchWorkflow:
|
|
628 |
"context": {"error": True},
|
629 |
"metadata": {"status": "error"}
|
630 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
631 |
|
632 |
# ------------------------------
|
633 |
# Enhanced Research Interface
|
|
|
169 |
emb = clip_model.get_image_features(**inputs)
|
170 |
embeddings.append(emb.numpy())
|
171 |
valid_images.append(img_path)
|
172 |
+
except FileNotFoundError:
|
173 |
logger.warning(f"Image file not found: {img_path}. Skipping this file.")
|
174 |
except Exception as e:
|
175 |
logger.exception(f"Error processing image {img_path}: {str(e)}")
|
|
|
430 |
def _retrieve_images(self, query: str) -> List[str]:
|
431 |
inputs = self.clip_processor(text=query, return_tensors="pt")
|
432 |
with torch.no_grad():
|
433 |
+
_ = self.clip_model.get_text_features(**inputs)
|
434 |
return ["image_result_1.png", "image_result_2.png"]
|
435 |
|
436 |
def _retrieve_code(self, query: str) -> List[str]:
|
|
|
628 |
"context": {"error": True},
|
629 |
"metadata": {"status": "error"}
|
630 |
}
|
631 |
+
|
632 |
+
def enhance_analysis(self, state: AgentState) -> Dict:
|
633 |
+
"""
|
634 |
+
Augments the analysis with multi-modal insights.
|
635 |
+
If images or code snippets are available in the context, they are appended to the analysis.
|
636 |
+
"""
|
637 |
+
try:
|
638 |
+
analysis = state["messages"][-1].content
|
639 |
+
enhanced = f"{analysis}\n\n## Multi-Modal Insights\n"
|
640 |
+
# Append image captions if available
|
641 |
+
if "images" in state["context"]:
|
642 |
+
enhanced += "### Visual Evidence\n"
|
643 |
+
for img in state["context"]["images"]:
|
644 |
+
enhanced += f"\n"
|
645 |
+
# Append code snippets if available
|
646 |
+
if "code" in state["context"]:
|
647 |
+
enhanced += "### Code Artifacts\n```python\n"
|
648 |
+
for code in state["context"]["code"]:
|
649 |
+
enhanced += f"{code}\n"
|
650 |
+
enhanced += "```"
|
651 |
+
return {
|
652 |
+
"messages": [AIMessage(content=enhanced)],
|
653 |
+
"context": state["context"]
|
654 |
+
}
|
655 |
+
except Exception as e:
|
656 |
+
logger.exception("Error during multi-modal enhancement.")
|
657 |
+
return self._error_state(f"Enhancement Error: {str(e)}")
|
658 |
|
659 |
# ------------------------------
|
660 |
# Enhanced Research Interface
|