Spaces:
Running
Running
Create workflow.py
Browse files- workflow.py +214 -0
workflow.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# workflow.py
|
2 |
+
|
3 |
+
import time
|
4 |
+
from datetime import datetime
|
5 |
+
from typing import Dict
|
6 |
+
|
7 |
+
from langchain_core.messages import AIMessage, HumanMessage
|
8 |
+
from langgraph.graph import END, StateGraph
|
9 |
+
from langgraph.graph.message import add_messages
|
10 |
+
|
11 |
+
from processor import EnhancedCognitiveProcessor
|
12 |
+
from config import ResearchConfig
|
13 |
+
|
14 |
+
import logging
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
class ResearchWorkflow:
|
18 |
+
"""
|
19 |
+
Defines a multi-step research workflow using a state graph.
|
20 |
+
"""
|
21 |
+
def __init__(self) -> None:
|
22 |
+
self.processor = EnhancedCognitiveProcessor()
|
23 |
+
self.workflow = StateGraph()
|
24 |
+
self._build_workflow()
|
25 |
+
self.app = self.workflow.compile()
|
26 |
+
|
27 |
+
def _build_workflow(self) -> None:
|
28 |
+
self.workflow.add_node("ingest", self.ingest_query)
|
29 |
+
self.workflow.add_node("retrieve", self.retrieve_documents)
|
30 |
+
self.workflow.add_node("analyze", self.analyze_content)
|
31 |
+
self.workflow.add_node("validate", self.validate_output)
|
32 |
+
self.workflow.add_node("refine", self.refine_results)
|
33 |
+
self.workflow.set_entry_point("ingest")
|
34 |
+
self.workflow.add_edge("ingest", "retrieve")
|
35 |
+
self.workflow.add_edge("retrieve", "analyze")
|
36 |
+
self.workflow.add_conditional_edges(
|
37 |
+
"analyze",
|
38 |
+
self._quality_check,
|
39 |
+
{"valid": "validate", "invalid": "refine"}
|
40 |
+
)
|
41 |
+
self.workflow.add_edge("validate", END)
|
42 |
+
self.workflow.add_edge("refine", "retrieve")
|
43 |
+
# Extended node for multi-modal enhancement
|
44 |
+
self.workflow.add_node("enhance", self.enhance_analysis)
|
45 |
+
self.workflow.add_edge("validate", "enhance")
|
46 |
+
self.workflow.add_edge("enhance", END)
|
47 |
+
|
48 |
+
def ingest_query(self, state: Dict) -> Dict:
|
49 |
+
try:
|
50 |
+
query = state["messages"][-1].content
|
51 |
+
# Retrieve the domain from the state's context (defaulting to Biomedical Research)
|
52 |
+
domain = state.get("context", {}).get("domain", "Biomedical Research")
|
53 |
+
new_context = {"raw_query": query, "domain": domain, "refine_count": 0, "refinement_history": []}
|
54 |
+
logger.info(f"Query ingested. Domain: {domain}")
|
55 |
+
return {
|
56 |
+
"messages": [AIMessage(content="Query ingested successfully")],
|
57 |
+
"context": new_context,
|
58 |
+
"metadata": {"timestamp": datetime.now().isoformat()}
|
59 |
+
}
|
60 |
+
except Exception as e:
|
61 |
+
logger.exception("Error during query ingestion.")
|
62 |
+
return self._error_state(f"Ingestion Error: {str(e)}")
|
63 |
+
|
64 |
+
def retrieve_documents(self, state: Dict) -> Dict:
|
65 |
+
try:
|
66 |
+
query = state["context"]["raw_query"]
|
67 |
+
# For demonstration, we use an empty document list.
|
68 |
+
# Replace this with actual retrieval logic as needed.
|
69 |
+
docs = []
|
70 |
+
logger.info(f"Retrieved {len(docs)} documents for query.")
|
71 |
+
return {
|
72 |
+
"messages": [AIMessage(content=f"Retrieved {len(docs)} documents")],
|
73 |
+
"context": {
|
74 |
+
"documents": docs,
|
75 |
+
"retrieval_time": time.time(),
|
76 |
+
"refine_count": state["context"].get("refine_count", 0),
|
77 |
+
"refinement_history": state["context"].get("refinement_history", []),
|
78 |
+
"domain": state["context"].get("domain", "Biomedical Research")
|
79 |
+
}
|
80 |
+
}
|
81 |
+
except Exception as e:
|
82 |
+
logger.exception("Error during document retrieval.")
|
83 |
+
return self._error_state(f"Retrieval Error: {str(e)}")
|
84 |
+
|
85 |
+
def analyze_content(self, state: Dict) -> Dict:
|
86 |
+
try:
|
87 |
+
domain = state["context"].get("domain", "Biomedical Research").strip().lower()
|
88 |
+
fallback_analyses = ResearchConfig.DOMAIN_FALLBACKS
|
89 |
+
if domain in fallback_analyses:
|
90 |
+
logger.info(f"Using fallback analysis for domain: {state['context'].get('domain')}")
|
91 |
+
return {
|
92 |
+
"messages": [AIMessage(content=fallback_analyses[domain].strip())],
|
93 |
+
"context": state["context"]
|
94 |
+
}
|
95 |
+
else:
|
96 |
+
docs = state["context"].get("documents", [])
|
97 |
+
docs_text = "\n\n".join([d.page_content for d in docs])
|
98 |
+
domain_prompt = ResearchConfig.DOMAIN_PROMPTS.get(domain, "")
|
99 |
+
full_prompt = f"{domain_prompt}\n\n" + ResearchConfig.ANALYSIS_TEMPLATE.format(context=docs_text)
|
100 |
+
response = self.processor.process_query(full_prompt)
|
101 |
+
if "error" in response:
|
102 |
+
logger.error("Backend response error during analysis.")
|
103 |
+
return self._error_state(response["error"])
|
104 |
+
logger.info("Content analysis completed.")
|
105 |
+
return {
|
106 |
+
"messages": [AIMessage(content=response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
|
107 |
+
"context": state["context"]
|
108 |
+
}
|
109 |
+
except Exception as e:
|
110 |
+
logger.exception("Error during content analysis.")
|
111 |
+
return self._error_state(f"Analysis Error: {str(e)}")
|
112 |
+
|
113 |
+
def validate_output(self, state: Dict) -> Dict:
|
114 |
+
try:
|
115 |
+
analysis = state["messages"][-1].content
|
116 |
+
validation_prompt = (
|
117 |
+
f"Validate the following research analysis:\n{analysis}\n\n"
|
118 |
+
"Check for:\n"
|
119 |
+
"1. Technical accuracy\n"
|
120 |
+
"2. Citation support (are claims backed by evidence?)\n"
|
121 |
+
"3. Logical consistency\n"
|
122 |
+
"4. Methodological soundness\n\n"
|
123 |
+
"Respond with 'VALID: [brief justification]' or 'INVALID: [brief justification]'."
|
124 |
+
)
|
125 |
+
response = self.processor.process_query(validation_prompt)
|
126 |
+
logger.info("Output validation completed.")
|
127 |
+
return {
|
128 |
+
"messages": [AIMessage(content=analysis + f"\n\nValidation: {response.get('choices', [{}])[0].get('message', {}).get('content', '')}")]
|
129 |
+
}
|
130 |
+
except Exception as e:
|
131 |
+
logger.exception("Error during output validation.")
|
132 |
+
return self._error_state(f"Validation Error: {str(e)}")
|
133 |
+
|
134 |
+
def refine_results(self, state: Dict) -> Dict:
|
135 |
+
try:
|
136 |
+
current_count = state["context"].get("refine_count", 0)
|
137 |
+
state["context"]["refine_count"] = current_count + 1
|
138 |
+
refinement_history = state["context"].setdefault("refinement_history", [])
|
139 |
+
current_analysis = state["messages"][-1].content
|
140 |
+
refinement_history.append(current_analysis)
|
141 |
+
difficulty_level = max(0, 3 - state["context"]["refine_count"])
|
142 |
+
logger.info(f"Refinement iteration: {state['context']['refine_count']}, Difficulty level: {difficulty_level}")
|
143 |
+
|
144 |
+
if state["context"]["refine_count"] >= 3:
|
145 |
+
meta_prompt = (
|
146 |
+
"You are given the following series of refinement outputs:\n" +
|
147 |
+
"\n---\n".join(refinement_history) +
|
148 |
+
"\n\nSynthesize the above into a final, concise, and high-quality technical analysis report. "
|
149 |
+
"Focus on the key findings and improvements made across the iterations. Do not introduce new ideas; just synthesize the improvements. Ensure the report is well-structured and easy to understand."
|
150 |
+
)
|
151 |
+
meta_response = self.processor.process_query(meta_prompt)
|
152 |
+
logger.info("Meta-refinement completed.")
|
153 |
+
return {
|
154 |
+
"messages": [AIMessage(content=meta_response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
|
155 |
+
"context": state["context"]
|
156 |
+
}
|
157 |
+
else:
|
158 |
+
refinement_prompt = (
|
159 |
+
f"Refine this analysis (current difficulty level: {difficulty_level}):\n{current_analysis}\n\n"
|
160 |
+
"First, critically evaluate the analysis and identify its weaknesses, such as inaccuracies, unsupported claims, or lack of clarity. Summarize these weaknesses in a short paragraph.\n\n"
|
161 |
+
"Then, improve the following aspects:\n"
|
162 |
+
"1. Technical precision\n"
|
163 |
+
"2. Empirical grounding\n"
|
164 |
+
"3. Theoretical coherence\n\n"
|
165 |
+
"Use a structured difficulty gradient approach (similar to LADDER) to produce a simpler yet more accurate variant, addressing the weaknesses identified."
|
166 |
+
)
|
167 |
+
response = self.processor.process_query(refinement_prompt)
|
168 |
+
logger.info("Refinement completed.")
|
169 |
+
return {
|
170 |
+
"messages": [AIMessage(content=response.get('choices', [{}])[0].get('message', {}).get('content', ''))],
|
171 |
+
"context": state["context"]
|
172 |
+
}
|
173 |
+
except Exception as e:
|
174 |
+
logger.exception("Error during refinement.")
|
175 |
+
return self._error_state(f"Refinement Error: {str(e)}")
|
176 |
+
|
177 |
+
def _quality_check(self, state: Dict) -> str:
|
178 |
+
refine_count = state["context"].get("refine_count", 0)
|
179 |
+
if refine_count >= 3:
|
180 |
+
logger.warning("Refinement limit reached. Forcing valid outcome.")
|
181 |
+
return "valid"
|
182 |
+
content = state["messages"][-1].content
|
183 |
+
quality = "valid" if "VALID" in content else "invalid"
|
184 |
+
logger.info(f"Quality check returned: {quality}")
|
185 |
+
return quality
|
186 |
+
|
187 |
+
def _error_state(self, message: str) -> Dict:
|
188 |
+
logger.error(message)
|
189 |
+
return {
|
190 |
+
"messages": [{"content": f"❌ {message}"}],
|
191 |
+
"context": {"error": True},
|
192 |
+
"metadata": {"status": "error"}
|
193 |
+
}
|
194 |
+
|
195 |
+
def enhance_analysis(self, state: Dict) -> Dict:
|
196 |
+
try:
|
197 |
+
analysis = state["messages"][-1].content
|
198 |
+
enhanced = f"{analysis}\n\n## Multi-Modal Insights\n"
|
199 |
+
if "images" in state["context"]:
|
200 |
+
enhanced += "### Visual Evidence\n"
|
201 |
+
for img in state["context"]["images"]:
|
202 |
+
enhanced += f"\n"
|
203 |
+
if "code" in state["context"]:
|
204 |
+
enhanced += "### Code Artifacts\n```python\n"
|
205 |
+
for code in state["context"]["code"]:
|
206 |
+
enhanced += f"{code}\n"
|
207 |
+
enhanced += "```"
|
208 |
+
return {
|
209 |
+
"messages": [{"content": enhanced}],
|
210 |
+
"context": state["context"]
|
211 |
+
}
|
212 |
+
except Exception as e:
|
213 |
+
logger.exception("Error during multi-modal enhancement.")
|
214 |
+
return self._error_state(f"Enhancement Error: {str(e)}")
|