Spaces:
Sleeping
Sleeping
Merge branch 'main' into Jeff
Browse files- app.py +3 -3
- src/generation.py +25 -25
- src/llm_clients.py +67 -6
app.py
CHANGED
@@ -116,10 +116,10 @@ class OnCallAIInterface:
|
|
116 |
"""
|
117 |
if not self.initialized:
|
118 |
error_msg = f"β System not initialized: {self.initialization_error}"
|
119 |
-
return error_msg, error_msg, "{}"
|
120 |
|
121 |
if not user_query or not user_query.strip():
|
122 |
-
return "Please enter a medical query to get started.", "", "{}"
|
123 |
|
124 |
processing_start = datetime.now()
|
125 |
processing_steps = []
|
@@ -142,7 +142,7 @@ class OnCallAIInterface:
|
|
142 |
if condition_result.get('type') == 'invalid_query':
|
143 |
non_medical_msg = condition_result.get('message', 'This appears to be a non-medical query.')
|
144 |
processing_steps.append(" π« Query identified as non-medical")
|
145 |
-
return non_medical_msg, '\n'.join(processing_steps), "{}"
|
146 |
|
147 |
# STEP 1.5: Hospital-Specific Customization (Early retrieval)
|
148 |
# Run this early since it has its own keyword extraction
|
|
|
116 |
"""
|
117 |
if not self.initialized:
|
118 |
error_msg = f"β System not initialized: {self.initialization_error}"
|
119 |
+
return error_msg, error_msg, "{}"
|
120 |
|
121 |
if not user_query or not user_query.strip():
|
122 |
+
return "Please enter a medical query to get started.", "", "{}"
|
123 |
|
124 |
processing_start = datetime.now()
|
125 |
processing_steps = []
|
|
|
142 |
if condition_result.get('type') == 'invalid_query':
|
143 |
non_medical_msg = condition_result.get('message', 'This appears to be a non-medical query.')
|
144 |
processing_steps.append(" π« Query identified as non-medical")
|
145 |
+
return non_medical_msg, '\n'.join(processing_steps), "{}"
|
146 |
|
147 |
# STEP 1.5: Hospital-Specific Customization (Early retrieval)
|
148 |
# Run this early since it has its own keyword extraction
|
src/generation.py
CHANGED
@@ -36,8 +36,8 @@ FALLBACK_TIMEOUTS = {
|
|
36 |
}
|
37 |
|
38 |
FALLBACK_TOKEN_LIMITS = {
|
39 |
-
"primary":
|
40 |
-
"fallback_1":
|
41 |
"fallback_2": 0 # Template-based, no LLM tokens
|
42 |
}
|
43 |
|
@@ -364,9 +364,9 @@ class MedicalAdviceGenerator:
|
|
364 |
β’ Reference evidence from above sources
|
365 |
β’ Emphasize clinical judgment
|
366 |
|
367 |
-
IMPORTANT: Keep response
|
368 |
|
369 |
-
Your response should
|
370 |
|
371 |
return prompt
|
372 |
|
@@ -784,20 +784,20 @@ class MedicalAdviceGenerator:
|
|
784 |
|
785 |
template = f"""Based on available medical guidelines for your query: "{user_query}"
|
786 |
|
787 |
-
CLINICAL GUIDANCE:
|
788 |
-
{formatted_context}
|
789 |
|
790 |
-
IMPORTANT CLINICAL NOTES:
|
791 |
-
β’ This guidance is based on standard medical protocols and guidelines
|
792 |
-
β’ Individual patient factors may require modifications to these recommendations
|
793 |
-
β’ Consider patient-specific contraindications and comorbidities
|
794 |
-
β’ Consult with senior physician or specialist for complex cases
|
795 |
-
β’ Follow local institutional protocols and policies
|
796 |
|
797 |
-
SYSTEM NOTE:
|
798 |
-
This response was generated using medical guidelines only, without advanced clinical reasoning, due to technical limitations with the primary system. For complex cases requiring detailed clinical analysis, please consult directly with medical professionals.
|
799 |
|
800 |
-
Please ensure appropriate clinical oversight and use professional medical judgment in applying these guidelines."""
|
801 |
|
802 |
return template
|
803 |
|
@@ -813,19 +813,19 @@ Please ensure appropriate clinical oversight and use professional medical judgme
|
|
813 |
"""
|
814 |
template = f"""Regarding your medical query: "{user_query}"
|
815 |
|
816 |
-
SYSTEM STATUS:
|
817 |
-
Due to technical difficulties with our medical guidance system, we cannot provide specific clinical recommendations at this time.
|
818 |
|
819 |
-
RECOMMENDED ACTIONS:
|
820 |
-
β’ Please consult with qualified healthcare providers for immediate clinical guidance
|
821 |
-
β’ Contact your primary care physician or relevant specialist
|
822 |
-
β’ For emergency situations, seek immediate medical attention
|
823 |
-
β’ Consider consulting medical literature or clinical decision support tools
|
824 |
|
825 |
-
IMPORTANT:
|
826 |
-
This system experienced technical limitations that prevented access to our medical guideline database. Professional medical consultation is strongly recommended for this query.
|
827 |
|
828 |
-
Please try rephrasing your question or contact our technical support if the issue persists."""
|
829 |
|
830 |
return template
|
831 |
|
|
|
36 |
}
|
37 |
|
38 |
FALLBACK_TOKEN_LIMITS = {
|
39 |
+
"primary": 1200, # Full comprehensive medical advice
|
40 |
+
"fallback_1": 600, # Concise medical guidance
|
41 |
"fallback_2": 0 # Template-based, no LLM tokens
|
42 |
}
|
43 |
|
|
|
364 |
β’ Reference evidence from above sources
|
365 |
β’ Emphasize clinical judgment
|
366 |
|
367 |
+
IMPORTANT: Keep response under 1000 words. Use concise numbered points. For complex cases with multiple conditions, address the most urgent condition first, then relevant comorbidities. Prioritize actionable clinical steps over theoretical explanations.
|
368 |
|
369 |
+
Your response should provide practical clinical guidance suitable for immediate bedside application with appropriate medical caution."""
|
370 |
|
371 |
return prompt
|
372 |
|
|
|
784 |
|
785 |
template = f"""Based on available medical guidelines for your query: "{user_query}"
|
786 |
|
787 |
+
CLINICAL GUIDANCE:
|
788 |
+
{formatted_context}
|
789 |
|
790 |
+
IMPORTANT CLINICAL NOTES:
|
791 |
+
β’ This guidance is based on standard medical protocols and guidelines
|
792 |
+
β’ Individual patient factors may require modifications to these recommendations
|
793 |
+
β’ Consider patient-specific contraindications and comorbidities
|
794 |
+
β’ Consult with senior physician or specialist for complex cases
|
795 |
+
β’ Follow local institutional protocols and policies
|
796 |
|
797 |
+
SYSTEM NOTE:
|
798 |
+
This response was generated using medical guidelines only, without advanced clinical reasoning, due to technical limitations with the primary system. For complex cases requiring detailed clinical analysis, please consult directly with medical professionals.
|
799 |
|
800 |
+
Please ensure appropriate clinical oversight and use professional medical judgment in applying these guidelines."""
|
801 |
|
802 |
return template
|
803 |
|
|
|
813 |
"""
|
814 |
template = f"""Regarding your medical query: "{user_query}"
|
815 |
|
816 |
+
SYSTEM STATUS:
|
817 |
+
Due to technical difficulties with our medical guidance system, we cannot provide specific clinical recommendations at this time.
|
818 |
|
819 |
+
RECOMMENDED ACTIONS:
|
820 |
+
β’ Please consult with qualified healthcare providers for immediate clinical guidance
|
821 |
+
β’ Contact your primary care physician or relevant specialist
|
822 |
+
β’ For emergency situations, seek immediate medical attention
|
823 |
+
β’ Consider consulting medical literature or clinical decision support tools
|
824 |
|
825 |
+
IMPORTANT:
|
826 |
+
This system experienced technical limitations that prevented access to our medical guideline database. Professional medical consultation is strongly recommended for this query.
|
827 |
|
828 |
+
Please try rephrasing your question or contact our technical support if the issue persists."""
|
829 |
|
830 |
return template
|
831 |
|
src/llm_clients.py
CHANGED
@@ -101,14 +101,23 @@ class llm_Med42_70BClient:
|
|
101 |
"role": "system",
|
102 |
"content": """You are a medical assistant trained to extract medical conditions.
|
103 |
|
104 |
-
|
105 |
-
|
|
|
|
|
106 |
|
107 |
-
|
108 |
-
-
|
109 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
-
DO NOT provide medical advice."""
|
112 |
},
|
113 |
{
|
114 |
"role": "user",
|
@@ -129,6 +138,17 @@ DO NOT provide medical advice."""
|
|
129 |
self.logger.info(f"Raw LLM Response: {response_text}")
|
130 |
self.logger.info(f"Query Latency: {latency:.4f} seconds")
|
131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
# Extract condition from response
|
133 |
extracted_condition = self._extract_condition(response_text)
|
134 |
|
@@ -265,6 +285,47 @@ Focus on: conditions, symptoms, procedures, body systems."""
|
|
265 |
|
266 |
return response.split('\n')[0].strip() or ""
|
267 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
def _is_rejection_response(self, response: str) -> bool:
|
269 |
"""
|
270 |
Dual-layer detection: prompt compliance + natural language patterns
|
|
|
101 |
"role": "system",
|
102 |
"content": """You are a medical assistant trained to extract medical conditions.
|
103 |
|
104 |
+
HANDLING MULTIPLE CONDITIONS:
|
105 |
+
1. If query contains multiple medical conditions, extract the PRIMARY/ACUTE condition
|
106 |
+
2. Priority order: Life-threatening emergencies > Acute conditions > Chronic diseases > Symptoms
|
107 |
+
3. For patient scenarios, focus on the condition requiring immediate medical attention
|
108 |
|
109 |
+
EXAMPLES:
|
110 |
+
- Single: "chest pain" β "Acute Coronary Syndrome"
|
111 |
+
- Multiple: "diabetic patient with chest pain" β "Acute Coronary Syndrome"
|
112 |
+
- Chronic+Acute: "hypertension patient having seizure" β "Seizure Disorder"
|
113 |
+
- Complex: "20-year-old female, porphyria, sudden seizure" β "Acute Seizure"
|
114 |
+
- Emergency context: "porphyria patient with sudden seizure" β "Seizure Disorder"
|
115 |
+
|
116 |
+
RESPONSE FORMAT:
|
117 |
+
- Medical queries: Return ONLY the primary condition name
|
118 |
+
- Non-medical queries: Return "NON_MEDICAL_QUERY"
|
119 |
|
120 |
+
DO NOT provide explanations or medical advice."""
|
121 |
},
|
122 |
{
|
123 |
"role": "user",
|
|
|
138 |
self.logger.info(f"Raw LLM Response: {response_text}")
|
139 |
self.logger.info(f"Query Latency: {latency:.4f} seconds")
|
140 |
|
141 |
+
# Detect abnormal response
|
142 |
+
if self._is_abnormal_response(response_text):
|
143 |
+
self.logger.error(f"β Abnormal LLM response detected: {response_text[:50]}...")
|
144 |
+
return {
|
145 |
+
'extracted_condition': '',
|
146 |
+
'confidence': '0',
|
147 |
+
'error': 'Abnormal LLM response detected',
|
148 |
+
'raw_response': response_text,
|
149 |
+
'latency': latency
|
150 |
+
}
|
151 |
+
|
152 |
# Extract condition from response
|
153 |
extracted_condition = self._extract_condition(response_text)
|
154 |
|
|
|
285 |
|
286 |
return response.split('\n')[0].strip() or ""
|
287 |
|
288 |
+
def _is_abnormal_response(self, response: str) -> bool:
|
289 |
+
"""
|
290 |
+
Detect abnormal LLM responses (e.g., repetitive characters, short/long responses)
|
291 |
+
|
292 |
+
Args:
|
293 |
+
response: LLM response text
|
294 |
+
|
295 |
+
Returns:
|
296 |
+
bool: True if response is abnormal, False otherwise
|
297 |
+
"""
|
298 |
+
if not response or not response.strip():
|
299 |
+
return True
|
300 |
+
|
301 |
+
response_stripped = response.strip()
|
302 |
+
|
303 |
+
# Detect repetitive characters (e.g., !!!!!!!)
|
304 |
+
if len(response_stripped) > 20:
|
305 |
+
unique_chars = len(set(response_stripped))
|
306 |
+
if unique_chars <= 3: # Only a few characters
|
307 |
+
self.logger.warning(f"Detected repetitive character pattern: {response_stripped[:30]}...")
|
308 |
+
return True
|
309 |
+
|
310 |
+
# Detect special character patterns
|
311 |
+
abnormal_patterns = ['!!!!', '????', '****', '####', '----']
|
312 |
+
for pattern in abnormal_patterns:
|
313 |
+
if pattern in response_stripped:
|
314 |
+
self.logger.warning(f"Detected abnormal pattern '{pattern}' in response")
|
315 |
+
return True
|
316 |
+
|
317 |
+
# Detect short response (less than 2 characters)
|
318 |
+
if len(response_stripped) < 2:
|
319 |
+
return True
|
320 |
+
|
321 |
+
# Detect long response - allow some flexibility for detailed medical advice
|
322 |
+
# 750 words β 1000-1200 chars, allow some flexibility to 2500 chars
|
323 |
+
if len(response_stripped) > 2500: # Changed from 1000 to 2500
|
324 |
+
self.logger.warning(f"Response extremely long: {len(response_stripped)} chars")
|
325 |
+
return True
|
326 |
+
|
327 |
+
return False
|
328 |
+
|
329 |
def _is_rejection_response(self, response: str) -> bool:
|
330 |
"""
|
331 |
Dual-layer detection: prompt compliance + natural language patterns
|