mgbam commited on
Commit
ab8d4bc
·
verified ·
1 Parent(s): 801f405

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -52
app.py CHANGED
@@ -1,5 +1,5 @@
1
  # ------------------------------
2
- # UniversalResearch AI System with Refinement Counter and DEEPSEEK_API_KEY
3
  # ------------------------------
4
  import logging
5
  import os
@@ -27,7 +27,7 @@ from langgraph.graph.message import add_messages
27
  from typing_extensions import TypedDict, Annotated
28
  from langchain.tools.retriever import create_retriever_tool
29
 
30
- # Increase Python's recursion limit at the very start (if needed)
31
  sys.setrecursionlimit(10000)
32
 
33
  # ------------------------------
@@ -43,6 +43,11 @@ logger = logging.getLogger(__name__)
43
  # State Schema Definition
44
  # ------------------------------
45
  class AgentState(TypedDict):
 
 
 
 
 
46
  messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
47
  context: Dict[str, Any]
48
  metadata: Dict[str, Any]
@@ -52,28 +57,37 @@ class AgentState(TypedDict):
52
  # ------------------------------
53
  class ResearchConfig:
54
  """
55
- Generic configuration for the UniversalResearch AI System.
56
- Make sure to set DEEPSEEK_API_KEY in your environment or HF Space secrets.
 
 
57
  """
58
- DEEPSEEK_API_KEY = os.environ.get("DEEPSEEK_API_KEY") # Updated to reference DEEPSEEK_API_KEY
59
  CHROMA_PATH = "chroma_db"
60
  CHUNK_SIZE = 512
61
  CHUNK_OVERLAP = 64
62
  MAX_CONCURRENT_REQUESTS = 5
63
  EMBEDDING_DIMENSIONS = 1536
 
 
64
  DOCUMENT_MAP = {
65
  "Sample Research Document 1": "Topic A Overview",
66
  "Sample Research Document 2": "Topic B Analysis",
67
  "Sample Research Document 3": "Topic C Innovations"
68
  }
 
 
69
  ANALYSIS_TEMPLATE = (
70
  "Analyze the following research documents with scientific rigor:\n{context}\n\n"
71
- "Provide your analysis with the following structure:\n"
72
- "1. Key Contributions (bullet points)\n"
73
- "2. Novel Methodologies\n"
74
- "3. Empirical Results (with metrics)\n"
75
- "4. Potential Applications\n"
76
- "5. Limitations & Future Directions\n\n"
 
 
 
77
  "Format your response in Markdown with LaTeX mathematical notation where applicable."
78
  )
79
 
@@ -93,7 +107,7 @@ if not ResearchConfig.DEEPSEEK_API_KEY:
93
  class UniversalDocumentManager:
94
  """
95
  Manages creation of document collections for any research domain.
96
- Documents are split into chunks and embedded using OpenAI embeddings.
97
  """
98
  def __init__(self) -> None:
99
  try:
@@ -110,7 +124,7 @@ class UniversalDocumentManager:
110
 
111
  def create_collection(self, documents: List[str], collection_name: str) -> Chroma:
112
  """
113
- Splits documents into chunks and stores them in a Chroma collection.
114
  """
115
  splitter = RecursiveCharacterTextSplitter(
116
  chunk_size=ResearchConfig.CHUNK_SIZE,
@@ -134,11 +148,11 @@ class UniversalDocumentManager:
134
 
135
  def _document_id(self, content: str) -> str:
136
  """
137
- Generates a unique document ID using a SHA256 hash combined with the current timestamp.
138
  """
139
  return f"{hashlib.sha256(content.encode()).hexdigest()[:16]}-{int(time.time())}"
140
 
141
- # Initialize document collections for multiple research domains
142
  udm = UniversalDocumentManager()
143
  research_docs = udm.create_collection([
144
  "Research Report: Novel AI Techniques in Renewable Energy",
@@ -157,8 +171,8 @@ development_docs = udm.create_collection([
157
  # ------------------------------
158
  class ResearchRetriever:
159
  """
160
- Provides retrieval methods for research documents.
161
- This class supports multiple domains, such as academic research and development.
162
  """
163
  def __init__(self) -> None:
164
  try:
@@ -178,6 +192,7 @@ class ResearchRetriever:
178
  def retrieve(self, query: str, domain: str) -> List[Any]:
179
  """
180
  Retrieves documents for a given query and domain.
 
181
  """
182
  try:
183
  if domain == "research":
@@ -198,8 +213,8 @@ retriever = ResearchRetriever()
198
  # ------------------------------
199
  class CognitiveProcessor:
200
  """
201
- Executes API requests to the DeepSeek backend using redundant parallel requests.
202
- The responses are consolidated via a consensus mechanism.
203
  """
204
  def __init__(self) -> None:
205
  self.executor = ThreadPoolExecutor(max_workers=ResearchConfig.MAX_CONCURRENT_REQUESTS)
@@ -207,10 +222,10 @@ class CognitiveProcessor:
207
 
208
  def process_query(self, prompt: str) -> Dict:
209
  """
210
- Processes a query by sending multiple API requests in parallel.
211
  """
212
  futures = []
213
- for _ in range(3): # Triple redundancy for improved reliability
214
  futures.append(self.executor.submit(self._execute_api_request, prompt))
215
 
216
  results = []
@@ -225,7 +240,7 @@ class CognitiveProcessor:
225
 
226
  def _execute_api_request(self, prompt: str) -> Dict:
227
  """
228
- Executes a single API request to the DeepSeek endpoint.
229
  """
230
  headers = {
231
  "Authorization": f"Bearer {ResearchConfig.DEEPSEEK_API_KEY}",
@@ -258,7 +273,7 @@ class CognitiveProcessor:
258
 
259
  def _consensus_check(self, results: List[Dict]) -> Dict:
260
  """
261
- Consolidates multiple API responses by selecting the one with the most content.
262
  """
263
  valid_results = [r for r in results if "error" not in r]
264
  if not valid_results:
@@ -267,12 +282,19 @@ class CognitiveProcessor:
267
  return max(valid_results, key=lambda x: len(x.get('choices', [{}])[0].get('message', {}).get('content', '')))
268
 
269
  # ------------------------------
270
- # Research Workflow Engine
271
  # ------------------------------
272
  class ResearchWorkflow:
273
  """
274
- Defines a multi-step research workflow using a state graph.
275
- This workflow is designed to be domain-agnostic, working for any research area.
 
 
 
 
 
 
 
276
  """
277
  def __init__(self) -> None:
278
  self.processor = CognitiveProcessor()
@@ -287,7 +309,8 @@ class ResearchWorkflow:
287
  self.workflow.add_node("analyze", self.analyze_content)
288
  self.workflow.add_node("validate", self.validate_output)
289
  self.workflow.add_node("refine", self.refine_results)
290
- # Set entry point and define transitions
 
291
  self.workflow.set_entry_point("ingest")
292
  self.workflow.add_edge("ingest", "retrieve")
293
  self.workflow.add_edge("retrieve", "analyze")
@@ -301,7 +324,7 @@ class ResearchWorkflow:
301
 
302
  def ingest_query(self, state: AgentState) -> Dict:
303
  """
304
- Ingests the research query and initializes the refinement counter.
305
  """
306
  try:
307
  query = state["messages"][-1].content
@@ -317,7 +340,8 @@ class ResearchWorkflow:
317
 
318
  def retrieve_documents(self, state: AgentState) -> Dict:
319
  """
320
- Retrieves research documents for the given query.
 
321
  """
322
  try:
323
  query = state["context"]["raw_query"]
@@ -336,7 +360,10 @@ class ResearchWorkflow:
336
 
337
  def analyze_content(self, state: AgentState) -> Dict:
338
  """
339
- Analyzes the retrieved research documents using the DeepSeek API.
 
 
 
340
  """
341
  try:
342
  docs = state["context"].get("documents", [])
@@ -362,12 +389,13 @@ class ResearchWorkflow:
362
 
363
  def validate_output(self, state: AgentState) -> Dict:
364
  """
365
- Validates the analysis report for technical accuracy and consistency.
 
366
  """
367
  analysis = state["messages"][-1].content
368
  validation_prompt = (
369
- f"Validate the following research analysis:\n{analysis}\n\n"
370
- "Check for:\n1. Technical accuracy\n2. Adequate citation support\n3. Logical consistency\n4. Methodological soundness\n\n"
371
  "Respond with 'VALID' or 'INVALID'."
372
  )
373
  response = self.processor.process_query(validation_prompt)
@@ -382,15 +410,22 @@ class ResearchWorkflow:
382
 
383
  def refine_results(self, state: AgentState) -> Dict:
384
  """
385
- Refines the analysis report if validation fails.
386
- Increments the refinement counter to avoid infinite loops.
 
 
 
 
387
  """
388
  current_count = state["context"].get("refine_count", 0)
389
  state["context"]["refine_count"] = current_count + 1
390
- logger.info(f"Refinement iteration: {state['context']['refine_count']}")
 
391
  refinement_prompt = (
392
- f"Refine this analysis:\n{state['messages'][-1].content}\n\n"
393
- "Improve by enhancing technical precision, empirical grounding, and theoretical coherence."
 
 
394
  )
395
  response = self.processor.process_query(refinement_prompt)
396
  logger.info("Refinement completed.")
@@ -405,21 +440,19 @@ class ResearchWorkflow:
405
 
406
  def _quality_check(self, state: AgentState) -> str:
407
  """
408
- Checks whether the analysis report is valid.
409
- Forces a valid state if the refinement counter exceeds a preset threshold.
410
  """
411
  refine_count = state["context"].get("refine_count", 0)
412
  if refine_count >= 3:
413
  logger.warning("Refinement limit reached. Forcing valid outcome to prevent infinite recursion.")
414
  return "valid"
415
  content = state["messages"][-1].content
416
- quality = "valid" if "VALID" in content else "invalid"
417
- logger.info(f"Quality check returned: {quality}")
418
- return quality
419
 
420
  def _error_state(self, message: str) -> Dict:
421
  """
422
- Returns a standardized error state.
423
  """
424
  logger.error(message)
425
  return {
@@ -433,8 +466,8 @@ class ResearchWorkflow:
433
  # ------------------------------
434
  class ResearchInterface:
435
  """
436
- Provides a Streamlit-based interface for executing the UniversalResearch AI workflow.
437
- The interface is domain-agnostic, making it suitable for research in any field.
438
  """
439
  def __init__(self) -> None:
440
  self.workflow = ResearchWorkflow()
@@ -442,7 +475,7 @@ class ResearchInterface:
442
 
443
  def _initialize_interface(self) -> None:
444
  st.set_page_config(
445
- page_title="UniversalResearch AI",
446
  layout="wide",
447
  initial_sidebar_state="expanded"
448
  )
@@ -496,7 +529,7 @@ class ResearchInterface:
496
 
497
  def _build_sidebar(self) -> None:
498
  with st.sidebar:
499
- st.title("🔍 Research Database")
500
  st.subheader("Featured Research Topics")
501
  for title, short in ResearchConfig.DOCUMENT_MAP.items():
502
  with st.expander(short):
@@ -506,19 +539,23 @@ class ResearchInterface:
506
  st.metric("Embedding Dimensions", ResearchConfig.EMBEDDING_DIMENSIONS)
507
 
508
  def _build_main_interface(self) -> None:
509
- st.title("🧠 UniversalResearch AI")
 
 
 
 
510
  query = st.text_area(
511
  "Research Query:",
512
  height=200,
513
- placeholder="Enter a research question or topic from any domain..."
514
  )
515
  if st.button("Execute Analysis", type="primary"):
516
  self._execute_analysis(query)
517
 
518
  def _execute_analysis(self, query: str) -> None:
519
  try:
520
- with st.spinner("Initializing Universal Analysis..."):
521
- # Invoke the workflow with an increased recursion limit configuration.
522
  results = self.workflow.app.stream({
523
  "messages": [HumanMessage(content=query)],
524
  "context": {},
@@ -539,6 +576,9 @@ Potential issues:
539
  )
540
 
541
  def _render_event(self, event: Dict) -> None:
 
 
 
542
  if 'ingest' in event:
543
  with st.container():
544
  st.success("✅ Query Ingested")
@@ -561,6 +601,7 @@ Potential issues:
561
  if "VALID" in content:
562
  st.success("✅ Validation Passed")
563
  with st.expander("View Validated Analysis", expanded=True):
 
564
  st.markdown(content.split("Validation:")[0])
565
  else:
566
  st.warning("⚠️ Validation Issues Detected")
 
1
  # ------------------------------
2
+ # UniversalResearch AI System + LADDER (Tufa Labs)
3
  # ------------------------------
4
  import logging
5
  import os
 
27
  from typing_extensions import TypedDict, Annotated
28
  from langchain.tools.retriever import create_retriever_tool
29
 
30
+ # Increase Python's recursion limit if needed
31
  sys.setrecursionlimit(10000)
32
 
33
  # ------------------------------
 
43
  # State Schema Definition
44
  # ------------------------------
45
  class AgentState(TypedDict):
46
+ """
47
+ Stores the messages and context for each step in the workflow.
48
+ 'messages' contain the conversation so far,
49
+ 'context' can hold domain-specific data, 'metadata' for additional info.
50
+ """
51
  messages: Annotated[Sequence[AIMessage | HumanMessage | ToolMessage], add_messages]
52
  context: Dict[str, Any]
53
  metadata: Dict[str, Any]
 
57
  # ------------------------------
58
  class ResearchConfig:
59
  """
60
+ Universal configuration for the research system, referencing Tufa Labs' LADDER approach.
61
+
62
+ Make sure to set DEEPSEEK_API_KEY in your environment or HF Space secrets
63
+ to enable the external LLM calls.
64
  """
65
+ DEEPSEEK_API_KEY = os.environ.get("DEEPSEEK_API_KEY")
66
  CHROMA_PATH = "chroma_db"
67
  CHUNK_SIZE = 512
68
  CHUNK_OVERLAP = 64
69
  MAX_CONCURRENT_REQUESTS = 5
70
  EMBEDDING_DIMENSIONS = 1536
71
+
72
+ # Example map for featured research docs
73
  DOCUMENT_MAP = {
74
  "Sample Research Document 1": "Topic A Overview",
75
  "Sample Research Document 2": "Topic B Analysis",
76
  "Sample Research Document 3": "Topic C Innovations"
77
  }
78
+
79
+ # Template referencing a general approach for analyzing research documents
80
  ANALYSIS_TEMPLATE = (
81
  "Analyze the following research documents with scientific rigor:\n{context}\n\n"
82
+ "Using the LADDER approach from Tufa Labs, the model should:\n"
83
+ "1. Break down the problem into simpler subproblems.\n"
84
+ "2. Iteratively refine the solution.\n"
85
+ "3. Provide thorough analysis, including:\n"
86
+ " a. Key Contributions\n"
87
+ " b. Novel Methodologies\n"
88
+ " c. Empirical Results (with metrics)\n"
89
+ " d. Potential Applications\n"
90
+ " e. Limitations & Future Directions\n\n"
91
  "Format your response in Markdown with LaTeX mathematical notation where applicable."
92
  )
93
 
 
107
  class UniversalDocumentManager:
108
  """
109
  Manages creation of document collections for any research domain.
110
+ Uses OpenAI embeddings for vector-based semantic search.
111
  """
112
  def __init__(self) -> None:
113
  try:
 
124
 
125
  def create_collection(self, documents: List[str], collection_name: str) -> Chroma:
126
  """
127
+ Splits documents into manageable chunks and stores them in a Chroma collection.
128
  """
129
  splitter = RecursiveCharacterTextSplitter(
130
  chunk_size=ResearchConfig.CHUNK_SIZE,
 
148
 
149
  def _document_id(self, content: str) -> str:
150
  """
151
+ Generates a unique document ID using SHA256 + timestamp.
152
  """
153
  return f"{hashlib.sha256(content.encode()).hexdigest()[:16]}-{int(time.time())}"
154
 
155
+ # Create example collections (can be replaced with domain-specific docs)
156
  udm = UniversalDocumentManager()
157
  research_docs = udm.create_collection([
158
  "Research Report: Novel AI Techniques in Renewable Energy",
 
171
  # ------------------------------
172
  class ResearchRetriever:
173
  """
174
+ Provides retrieval methods for multiple domains (research, development, etc.).
175
+ Uses MMR (Maximal Marginal Relevance) or similarity-based retrieval from Chroma.
176
  """
177
  def __init__(self) -> None:
178
  try:
 
192
  def retrieve(self, query: str, domain: str) -> List[Any]:
193
  """
194
  Retrieves documents for a given query and domain.
195
+ Defaults to 'research' if domain is unrecognized.
196
  """
197
  try:
198
  if domain == "research":
 
213
  # ------------------------------
214
  class CognitiveProcessor:
215
  """
216
+ Sends parallel requests to the DeepSeek API to reduce failures.
217
+ Implements a consensus mechanism to pick the most comprehensive response.
218
  """
219
  def __init__(self) -> None:
220
  self.executor = ThreadPoolExecutor(max_workers=ResearchConfig.MAX_CONCURRENT_REQUESTS)
 
222
 
223
  def process_query(self, prompt: str) -> Dict:
224
  """
225
+ Processes a query by sending multiple parallel requests (triple redundancy).
226
  """
227
  futures = []
228
+ for _ in range(3):
229
  futures.append(self.executor.submit(self._execute_api_request, prompt))
230
 
231
  results = []
 
240
 
241
  def _execute_api_request(self, prompt: str) -> Dict:
242
  """
243
+ Executes a single request to the DeepSeek endpoint.
244
  """
245
  headers = {
246
  "Authorization": f"Bearer {ResearchConfig.DEEPSEEK_API_KEY}",
 
273
 
274
  def _consensus_check(self, results: List[Dict]) -> Dict:
275
  """
276
+ Chooses the best response by comparing the length of the message content.
277
  """
278
  valid_results = [r for r in results if "error" not in r]
279
  if not valid_results:
 
282
  return max(valid_results, key=lambda x: len(x.get('choices', [{}])[0].get('message', {}).get('content', '')))
283
 
284
  # ------------------------------
285
+ # Research Workflow Engine (LADDER Integration)
286
  # ------------------------------
287
  class ResearchWorkflow:
288
  """
289
+ Defines a multi-step workflow using LangGraph with Tufa Labs' LADDER approach:
290
+ 1. Ingest Query
291
+ 2. Retrieve Documents
292
+ 3. Analyze Content
293
+ 4. Validate Output
294
+ 5. Refine (Recursive Self-Learning + TTRL)
295
+
296
+ The 'refine_results' node applies LADDER’s idea of iteratively
297
+ breaking down problems and re-solving them with no external data.
298
  """
299
  def __init__(self) -> None:
300
  self.processor = CognitiveProcessor()
 
309
  self.workflow.add_node("analyze", self.analyze_content)
310
  self.workflow.add_node("validate", self.validate_output)
311
  self.workflow.add_node("refine", self.refine_results)
312
+
313
+ # Entry point and transitions
314
  self.workflow.set_entry_point("ingest")
315
  self.workflow.add_edge("ingest", "retrieve")
316
  self.workflow.add_edge("retrieve", "analyze")
 
324
 
325
  def ingest_query(self, state: AgentState) -> Dict:
326
  """
327
+ Ingests the research query and initializes the LADDER-based refinement counter.
328
  """
329
  try:
330
  query = state["messages"][-1].content
 
340
 
341
  def retrieve_documents(self, state: AgentState) -> Dict:
342
  """
343
+ Retrieves relevant documents based on the query.
344
+ The system can handle any domain (math, code generation, theorem proving, etc.).
345
  """
346
  try:
347
  query = state["context"]["raw_query"]
 
360
 
361
  def analyze_content(self, state: AgentState) -> Dict:
362
  """
363
+ Analyzes the retrieved documents using Tufa Labs' LADDER principles:
364
+ - Break down the documents,
365
+ - Provide structured analysis,
366
+ - Return a refined solution.
367
  """
368
  try:
369
  docs = state["context"].get("documents", [])
 
389
 
390
  def validate_output(self, state: AgentState) -> Dict:
391
  """
392
+ Validates the analysis. If invalid, the system can refine the solution
393
+ (potentially multiple times) using LADDER’s iterative approach.
394
  """
395
  analysis = state["messages"][-1].content
396
  validation_prompt = (
397
+ f"Validate this analysis:\n{analysis}\n\n"
398
+ "Check for:\n1. Technical accuracy\n2. Citation support\n3. Logical consistency\n4. Methodological soundness\n\n"
399
  "Respond with 'VALID' or 'INVALID'."
400
  )
401
  response = self.processor.process_query(validation_prompt)
 
410
 
411
  def refine_results(self, state: AgentState) -> Dict:
412
  """
413
+ Applies Tufa Labs' LADDER principle:
414
+ - Recursively break down the problem,
415
+ - Re-solve with no external data,
416
+ - Potentially leverage TTRL for dynamic updates.
417
+
418
+ This method increments a refinement counter to avoid infinite recursion.
419
  """
420
  current_count = state["context"].get("refine_count", 0)
421
  state["context"]["refine_count"] = current_count + 1
422
+ logger.info(f"LADDER refinement iteration: {state['context']['refine_count']}")
423
+
424
  refinement_prompt = (
425
+ f"Refine this analysis using LADDER's self-improvement approach:\n"
426
+ f"{state['messages'][-1].content}\n\n"
427
+ "Focus on breaking down complex points further and re-solving them.\n"
428
+ "Enhance technical precision, empirical grounding, and theoretical coherence."
429
  )
430
  response = self.processor.process_query(refinement_prompt)
431
  logger.info("Refinement completed.")
 
440
 
441
  def _quality_check(self, state: AgentState) -> str:
442
  """
443
+ Determines if the analysis is 'valid' or 'invalid'.
444
+ If refine_count exceeds 3, forcibly accept the result to prevent infinite loops.
445
  """
446
  refine_count = state["context"].get("refine_count", 0)
447
  if refine_count >= 3:
448
  logger.warning("Refinement limit reached. Forcing valid outcome to prevent infinite recursion.")
449
  return "valid"
450
  content = state["messages"][-1].content
451
+ return "valid" if "VALID" in content else "invalid"
 
 
452
 
453
  def _error_state(self, message: str) -> Dict:
454
  """
455
+ Returns an error state if any node fails.
456
  """
457
  logger.error(message)
458
  return {
 
466
  # ------------------------------
467
  class ResearchInterface:
468
  """
469
+ Provides a Streamlit-based interface for the UniversalResearch AI with LADDER.
470
+ The system is domain-agnostic, handling math, code generation, theorem proving, etc.
471
  """
472
  def __init__(self) -> None:
473
  self.workflow = ResearchWorkflow()
 
475
 
476
  def _initialize_interface(self) -> None:
477
  st.set_page_config(
478
+ page_title="UniversalResearch AI (LADDER)",
479
  layout="wide",
480
  initial_sidebar_state="expanded"
481
  )
 
529
 
530
  def _build_sidebar(self) -> None:
531
  with st.sidebar:
532
+ st.title("🔍 Research Database (LADDER)")
533
  st.subheader("Featured Research Topics")
534
  for title, short in ResearchConfig.DOCUMENT_MAP.items():
535
  with st.expander(short):
 
539
  st.metric("Embedding Dimensions", ResearchConfig.EMBEDDING_DIMENSIONS)
540
 
541
  def _build_main_interface(self) -> None:
542
+ st.title("🧠 UniversalResearch AI with Tufa Labs’ LADDER")
543
+ st.write(
544
+ "Leverage the power of Tufa Labs' LADDER approach for recursive self-improvement. "
545
+ "No external data required—just a structured difficulty gradient and test-time reinforcement."
546
+ )
547
  query = st.text_area(
548
  "Research Query:",
549
  height=200,
550
+ placeholder="Enter a research question, from math to code generation..."
551
  )
552
  if st.button("Execute Analysis", type="primary"):
553
  self._execute_analysis(query)
554
 
555
  def _execute_analysis(self, query: str) -> None:
556
  try:
557
+ with st.spinner("Initializing LADDER-based Analysis..."):
558
+ # The recursion_limit config ensures we can handle multiple refine iterations
559
  results = self.workflow.app.stream({
560
  "messages": [HumanMessage(content=query)],
561
  "context": {},
 
576
  )
577
 
578
  def _render_event(self, event: Dict) -> None:
579
+ """
580
+ Renders each event in the Streamlit UI.
581
+ """
582
  if 'ingest' in event:
583
  with st.container():
584
  st.success("✅ Query Ingested")
 
601
  if "VALID" in content:
602
  st.success("✅ Validation Passed")
603
  with st.expander("View Validated Analysis", expanded=True):
604
+ # Remove "Validation: ..." for a cleaner final result
605
  st.markdown(content.split("Validation:")[0])
606
  else:
607
  st.warning("⚠️ Validation Issues Detected")