walaa2022 commited on
Commit
cfb8694
Β·
verified Β·
1 Parent(s): ce18b01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +411 -61
app.py CHANGED
@@ -1,41 +1,93 @@
 
 
 
 
 
 
1
  import gradio as gr
2
- import librosa
3
  import numpy as np
4
- import tensorflow as tf
5
  import matplotlib.pyplot as plt
6
  from datetime import datetime
7
  import json
8
- import os
9
  from PIL import Image
10
- import google.generativeai as genai
11
  from typing import Dict, List, Tuple, Optional
 
 
12
 
13
- import os
14
- os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
15
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
 
 
 
 
 
 
16
 
17
- # Configure Gemini AI
18
- # You'll need to set your API key: export GOOGLE_API_KEY="your_api_key_here"
19
- genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
20
- gemini_model = genai.GenerativeModel('gemini-1.5-flash')
 
 
 
 
 
21
 
22
- # Load the pre-trained ResNet model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
 
24
  def load_heartbeat_model():
 
 
 
 
25
  try:
26
  model = tf.keras.models.load_model('Heart_ResNet.h5')
 
27
  return model
28
- except:
29
- print("Warning: Heart_ResNet.h5 model not found. Using mock predictions.")
 
30
  return None
31
 
32
- heartbeat_model = load_heartbeat_model()
 
 
 
 
 
 
 
 
33
 
34
  # Global storage for patient data (in production, use a proper database)
35
  patient_data = {}
36
 
37
  def process_audio(file_path: str) -> Tuple[np.ndarray, np.ndarray, int]:
38
  """Process audio file and extract MFCC features."""
 
 
 
 
39
  SAMPLE_RATE = 22050
40
  DURATION = 10
41
  input_length = int(SAMPLE_RATE * DURATION)
@@ -54,31 +106,57 @@ def process_audio(file_path: str) -> Tuple[np.ndarray, np.ndarray, int]:
54
  print(f"Error processing audio: {e}")
55
  return None, None, None
56
 
57
- def analyze_heartbeat(audio_file) -> Tuple[Dict, str]:
58
  """Analyze heartbeat audio and return results with visualization."""
59
  if audio_file is None:
60
- return {}, "No audio file provided"
 
 
 
61
 
62
  try:
63
  mfccs, waveform, sr = process_audio(audio_file)
64
  if mfccs is None:
65
- return {}, "Error processing audio file"
 
 
 
66
 
67
- if heartbeat_model is not None:
68
  features = mfccs.reshape(1, 52, 1)
69
- preds = heartbeat_model.predict(features)
70
  class_names = ["artifact", "murmur", "normal"]
 
71
  results = {name: round(float(preds[0][i]) * 100, 1) for i, name in enumerate(class_names)}
72
  else:
73
  # Mock results for demonstration
74
- results = {"artifact": 0.15, "murmur": 0.25, "normal": 0.60}
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  # Create waveform visualization
77
  fig, ax = plt.subplots(figsize=(12, 4))
78
- librosa.display.waveshow(waveform, sr=sr, ax=ax)
79
- ax.set_title("Heartbeat Waveform Analysis")
 
 
 
 
 
 
80
  ax.set_xlabel("Time (seconds)")
81
  ax.set_ylabel("Amplitude")
 
82
  plt.tight_layout()
83
 
84
  # Save plot
@@ -86,50 +164,238 @@ def analyze_heartbeat(audio_file) -> Tuple[Dict, str]:
86
  plt.savefig(plot_path, dpi=150, bbox_inches='tight')
87
  plt.close()
88
 
89
- return results, plot_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  except Exception as e:
92
- return {}, f"Error analyzing heartbeat: {str(e)}"
93
 
94
  def analyze_medical_image(image) -> str:
95
  """Analyze medical images using Gemini Vision."""
96
  if image is None:
97
  return "No image provided"
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  try:
100
  # Convert to PIL Image if needed
101
  if not isinstance(image, Image.Image):
102
  image = Image.fromarray(image)
103
 
104
  prompt = """
105
- Analyze this medical image/investigation result. Please provide:
106
- 1. Type of investigation/scan
107
- 2. Key findings visible in the image
108
- 3. Any abnormalities or areas of concern
109
- 4. Recommendations for follow-up if needed
 
 
 
 
110
 
111
- Please be thorough but remember this is for educational purposes and should not replace professional medical diagnosis.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  """
113
 
114
  response = gemini_model.generate_content([prompt, image])
115
- return response.text
 
 
 
 
 
 
 
 
 
 
116
 
117
  except Exception as e:
118
- return f"Error analyzing image: {str(e)}"
119
 
120
  def generate_comprehensive_assessment(patient_info: Dict) -> str:
121
  """Generate comprehensive medical assessment using Gemini AI."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  try:
123
- # Prepare comprehensive prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  prompt = f"""
125
- Based on the following comprehensive patient data, provide a detailed medical assessment:
 
126
 
127
  PATIENT DEMOGRAPHICS:
128
  - Name: {patient_info.get('name', 'Not provided')}
129
- - Age: {patient_info.get('age', 'Not provided')}
130
  - Sex: {patient_info.get('sex', 'Not provided')}
131
  - Weight: {patient_info.get('weight', 'Not provided')} kg
132
  - Height: {patient_info.get('height', 'Not provided')} cm
 
133
 
134
  CHIEF COMPLAINT:
135
  {patient_info.get('complaint', 'Not provided')}
@@ -146,23 +412,62 @@ def generate_comprehensive_assessment(patient_info: Dict) -> str:
146
  INVESTIGATIONS:
147
  {patient_info.get('investigation_analysis', 'Not provided')}
148
 
149
- Please provide a comprehensive medical assessment including:
150
- 1. Clinical Summary
151
- 2. Differential Diagnosis (list possible conditions)
152
- 3. Risk Factors Assessment
153
- 4. Recommended Treatment Plan
154
- 5. Follow-up Recommendations
155
- 6. Patient Education Points
156
- 7. Prognosis
 
 
 
 
 
 
 
 
 
157
 
158
- Please structure your response professionally and remember this is for educational purposes.
 
 
159
  """
160
 
161
  response = gemini_model.generate_content(prompt)
162
- return response.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
  except Exception as e:
165
- return f"Error generating assessment: {str(e)}"
166
 
167
  def save_patient_data(name, age, sex, weight, height, complaint, medical_history,
168
  examination, heartbeat_results, investigation_analysis):
@@ -220,7 +525,29 @@ def process_complete_consultation(name, age, sex, weight, height, complaint,
220
 
221
  # Create Gradio interface
222
  def create_interface():
223
- with gr.Blocks(title="Comprehensive Medical Consultation System", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
  gr.Markdown("""
226
  # πŸ₯ Comprehensive Medical Consultation System
@@ -312,8 +639,9 @@ def create_interface():
312
 
313
  assessment_output = gr.Textbox(
314
  label="AI-Generated Medical Assessment",
315
- lines=15,
316
- placeholder="Complete medical assessment will be generated here based on all provided information..."
 
317
  )
318
 
319
  # Hidden outputs to collect all data
@@ -386,15 +714,37 @@ def create_interface():
386
 
387
  # Launch the application
388
  if __name__ == "__main__":
389
- # Check if required environment variables are set
390
- if not os.getenv("GOOGLE_API_KEY"):
391
- print("Warning: GOOGLE_API_KEY not set. Gemini AI features will not work.")
392
- print("Set your API key with: export GOOGLE_API_KEY='your_api_key_here'")
 
 
 
 
 
393
 
394
- demo = create_interface()
395
- demo.launch(
396
- server_name="0.0.0.0",
397
- server_port=7860,
398
- share=True,
399
- debug=True
400
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
4
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
5
+
6
+ # Import with error handling
7
  import gradio as gr
 
8
  import numpy as np
 
9
  import matplotlib.pyplot as plt
10
  from datetime import datetime
11
  import json
 
12
  from PIL import Image
 
13
  from typing import Dict, List, Tuple, Optional
14
+ import warnings
15
+ warnings.filterwarnings('ignore')
16
 
17
+ # Optional imports with fallbacks
18
+ try:
19
+ import librosa
20
+ import librosa.display
21
+ LIBROSA_AVAILABLE = True
22
+ print("βœ… Librosa loaded successfully")
23
+ except ImportError:
24
+ print("⚠️ Warning: librosa not available. Audio processing will be limited.")
25
+ LIBROSA_AVAILABLE = False
26
 
27
+ try:
28
+ import tensorflow as tf
29
+ # Suppress TF warnings
30
+ tf.get_logger().setLevel('ERROR')
31
+ TF_AVAILABLE = True
32
+ print("βœ… TensorFlow loaded successfully")
33
+ except ImportError:
34
+ print("⚠️ Warning: TensorFlow not available. Using mock predictions.")
35
+ TF_AVAILABLE = False
36
 
37
+ try:
38
+ import google.generativeai as genai
39
+ GEMINI_AVAILABLE = True
40
+ print("βœ… Google Generative AI loaded successfully")
41
+ except ImportError:
42
+ print("⚠️ Warning: google-generativeai not available. AI features will be limited.")
43
+ GEMINI_AVAILABLE = False
44
+
45
+ # Configure Gemini AI with error handling
46
+ if GEMINI_AVAILABLE and os.getenv("GOOGLE_API_KEY"):
47
+ try:
48
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
49
+ gemini_model = genai.GenerativeModel('gemini-1.5-flash')
50
+ except Exception as e:
51
+ print(f"Warning: Failed to initialize Gemini: {e}")
52
+ GEMINI_AVAILABLE = False
53
+ gemini_model = None
54
+ else:
55
+ gemini_model = None
56
 
57
+ # Load the pre-trained ResNet model with error handling
58
  def load_heartbeat_model():
59
+ if not TF_AVAILABLE:
60
+ print("πŸ“‹ TensorFlow not available - using mock predictions")
61
+ return None
62
+
63
  try:
64
  model = tf.keras.models.load_model('Heart_ResNet.h5')
65
+ print("🎯 Heart_ResNet.h5 model loaded successfully")
66
  return model
67
+ except Exception as e:
68
+ print(f"πŸ“‹ Could not load Heart_ResNet.h5 model: {e}")
69
+ print("πŸ“‹ Using mock predictions instead")
70
  return None
71
 
72
+ # Initialize model (removed @gr.utils.cache decorator)
73
+ heartbeat_model = None
74
+
75
+ def get_heartbeat_model():
76
+ """Get or load the heartbeat model (lazy loading)"""
77
+ global heartbeat_model
78
+ if heartbeat_model is None:
79
+ heartbeat_model = load_heartbeat_model()
80
+ return heartbeat_model
81
 
82
  # Global storage for patient data (in production, use a proper database)
83
  patient_data = {}
84
 
85
  def process_audio(file_path: str) -> Tuple[np.ndarray, np.ndarray, int]:
86
  """Process audio file and extract MFCC features."""
87
+ if not LIBROSA_AVAILABLE:
88
+ print("Librosa not available - cannot process audio")
89
+ return None, None, None
90
+
91
  SAMPLE_RATE = 22050
92
  DURATION = 10
93
  input_length = int(SAMPLE_RATE * DURATION)
 
106
  print(f"Error processing audio: {e}")
107
  return None, None, None
108
 
109
+ def analyze_heartbeat(audio_file) -> Tuple[str, str]:
110
  """Analyze heartbeat audio and return results with visualization."""
111
  if audio_file is None:
112
+ return "No audio file provided", None
113
+
114
+ if not LIBROSA_AVAILABLE:
115
+ return "Audio processing not available (librosa not installed)", None
116
 
117
  try:
118
  mfccs, waveform, sr = process_audio(audio_file)
119
  if mfccs is None:
120
+ return "Error processing audio file", None
121
+
122
+ # Get model lazily
123
+ model = get_heartbeat_model()
124
 
125
+ if model is not None and TF_AVAILABLE:
126
  features = mfccs.reshape(1, 52, 1)
127
+ preds = model.predict(features, verbose=0) # Suppress prediction output
128
  class_names = ["artifact", "murmur", "normal"]
129
+ # Convert to percentages and round to nearest 0.1
130
  results = {name: round(float(preds[0][i]) * 100, 1) for i, name in enumerate(class_names)}
131
  else:
132
  # Mock results for demonstration
133
+ import random
134
+ random.seed(42) # For consistent demo results
135
+ results = {
136
+ "artifact": round(random.uniform(0.5, 2.5), 1),
137
+ "murmur": round(random.uniform(1.5, 3.5), 1),
138
+ "normal": round(random.uniform(94.0, 98.0), 1)
139
+ }
140
+ # Ensure they sum to 100.0
141
+ total = sum(results.values())
142
+ if total != 100.0:
143
+ # Adjust the largest value to make sum exactly 100.0
144
+ max_key = max(results, key=results.get)
145
+ results[max_key] = round(results[max_key] + (100.0 - total), 1)
146
 
147
  # Create waveform visualization
148
  fig, ax = plt.subplots(figsize=(12, 4))
149
+ if LIBROSA_AVAILABLE:
150
+ librosa.display.waveshow(waveform, sr=sr, ax=ax)
151
+ else:
152
+ # Simple plot if librosa.display not available
153
+ time_axis = np.linspace(0, len(waveform)/sr, len(waveform))
154
+ ax.plot(time_axis, waveform)
155
+
156
+ ax.set_title("Heartbeat Waveform Analysis", fontsize=14, fontweight='bold')
157
  ax.set_xlabel("Time (seconds)")
158
  ax.set_ylabel("Amplitude")
159
+ ax.grid(True, alpha=0.3)
160
  plt.tight_layout()
161
 
162
  # Save plot
 
164
  plt.savefig(plot_path, dpi=150, bbox_inches='tight')
165
  plt.close()
166
 
167
+ # Determine primary classification
168
+ max_class = max(results, key=results.get)
169
+ confidence = results[max_class]
170
+
171
+ # Status and interpretation
172
+ status = "Model-based analysis" if model else "Demo mode (model not loaded)"
173
+
174
+ if max_class == "normal" and confidence >= 90.0:
175
+ interpretation = "βœ… Normal heartbeat detected"
176
+ elif max_class == "murmur" and confidence >= 70.0:
177
+ interpretation = "⚠️ Heart murmur detected - recommend medical evaluation"
178
+ elif max_class == "artifact" and confidence >= 50.0:
179
+ interpretation = "πŸ”Š Audio artifact detected - consider re-recording"
180
+ else:
181
+ interpretation = "❓ Inconclusive result - recommend professional evaluation"
182
+
183
+ # Format results as text
184
+ results_text = f"""🩺 HEART SOUNDS ANALYSIS RESULTS
185
+ {'='*45}
186
+
187
+ πŸ“Š Classification Probabilities:
188
+ β€’ Normal Heartbeat: {results['normal']}%
189
+ β€’ Heart Murmur: {results['murmur']}%
190
+ β€’ Audio Artifact: {results['artifact']}%
191
+
192
+ 🎯 Primary Classification: {max_class.upper()} ({confidence}%)
193
+ πŸ” Interpretation: {interpretation}
194
+
195
+ πŸ“ˆ Analysis Status: {status}
196
+ πŸ”Š Audio Duration: {len(waveform)/sr:.1f} seconds
197
+ πŸ“ Sample Rate: {sr} Hz
198
+
199
+ ⚠️ Note: This analysis is for educational purposes only.
200
+ Always consult a qualified healthcare professional."""
201
+
202
+ return results_text, plot_path
203
 
204
  except Exception as e:
205
+ return f"Error analyzing heartbeat: {str(e)}", None
206
 
207
  def analyze_medical_image(image) -> str:
208
  """Analyze medical images using Gemini Vision."""
209
  if image is None:
210
  return "No image provided"
211
 
212
+ if not GEMINI_AVAILABLE or gemini_model is None:
213
+ return """πŸ”¬ MEDICAL IMAGE ANALYSIS
214
+ {'='*40}
215
+
216
+ ⚠️ AI Analysis Not Available
217
+ Gemini AI is not configured or installed.
218
+
219
+ πŸ“‹ MOCK ANALYSIS REPORT:
220
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
221
+
222
+ πŸ₯ Investigation Type: Medical Image/Scan
223
+ πŸ“Š Image Quality: Acceptable for review
224
+ πŸ” General Findings: Image appears to show medical investigation
225
+
226
+ πŸ“ RECOMMENDATIONS:
227
+ β€’ Ensure proper medical interpretation by qualified radiologist
228
+ β€’ Correlate findings with clinical presentation
229
+ β€’ Consider additional imaging if clinically indicated
230
+ β€’ Follow institutional protocols for image review
231
+
232
+ ⚠️ IMPORTANT: This is a demonstration mode.
233
+ To enable full AI analysis:
234
+ 1. Install: pip install google-generativeai
235
+ 2. Set environment variable: GOOGLE_API_KEY
236
+ 3. Restart the application
237
+
238
+ 🩺 Always consult qualified healthcare professionals for medical interpretation."""
239
+
240
  try:
241
  # Convert to PIL Image if needed
242
  if not isinstance(image, Image.Image):
243
  image = Image.fromarray(image)
244
 
245
  prompt = """
246
+ As a medical AI assistant, analyze this medical image/investigation result. Please provide a structured report with:
247
+
248
+ 1. IMAGE TYPE & QUALITY:
249
+ - Type of investigation/scan
250
+ - Image quality assessment
251
+
252
+ 2. TECHNICAL PARAMETERS:
253
+ - Visible technical details
254
+ - Imaging modality characteristics
255
 
256
+ 3. ANATOMICAL STRUCTURES:
257
+ - Clearly visible structures
258
+ - Anatomical landmarks
259
+
260
+ 4. FINDINGS:
261
+ - Normal findings
262
+ - Any abnormalities or areas of concern
263
+ - Measurements if applicable
264
+
265
+ 5. CLINICAL CORRELATION:
266
+ - Significance of findings
267
+ - Recommendations for follow-up
268
+
269
+ 6. LIMITATIONS:
270
+ - Any limitations of the study
271
+ - Areas requiring further evaluation
272
+
273
+ Please format your response professionally and remember this is for educational purposes only. Emphasize that this should not replace professional medical diagnosis by qualified healthcare professionals.
274
  """
275
 
276
  response = gemini_model.generate_content([prompt, image])
277
+
278
+ formatted_response = f"""πŸ”¬ AI MEDICAL IMAGE ANALYSIS
279
+ {'='*45}
280
+
281
+ {response.text}
282
+
283
+ {'='*45}
284
+ ⚠️ DISCLAIMER: This AI analysis is for educational purposes only.
285
+ Always consult qualified healthcare professionals for definitive diagnosis."""
286
+
287
+ return formatted_response
288
 
289
  except Exception as e:
290
+ return f"🚨 Error analyzing image: {str(e)}\n\nPlease check your Gemini API configuration and try again."
291
 
292
  def generate_comprehensive_assessment(patient_info: Dict) -> str:
293
  """Generate comprehensive medical assessment using Gemini AI."""
294
+ if not GEMINI_AVAILABLE or gemini_model is None:
295
+ # Calculate BMI if height and weight available
296
+ bmi_info = ""
297
+ if patient_info.get('weight') and patient_info.get('height'):
298
+ try:
299
+ weight = float(patient_info.get('weight'))
300
+ height = float(patient_info.get('height')) / 100 # Convert cm to m
301
+ bmi = weight / (height * height)
302
+ bmi_info = f"BMI: {bmi:.1f} kg/mΒ²"
303
+ except:
304
+ bmi_info = "BMI: Unable to calculate"
305
+
306
+ return f"""# πŸ₯ COMPREHENSIVE MEDICAL ASSESSMENT
307
+
308
+ ## πŸ‘€ PATIENT DEMOGRAPHICS
309
+ **Name:** {patient_info.get('name', 'Not provided')}
310
+ **Age:** {patient_info.get('age', 'Not provided')} years
311
+ **Sex:** {patient_info.get('sex', 'Not provided')}
312
+ **Weight:** {patient_info.get('weight', 'Not provided')} kg
313
+ **Height:** {patient_info.get('height', 'Not provided')} cm
314
+ {bmi_info}
315
+
316
+ ---
317
+
318
+ ## 🩺 CLINICAL PRESENTATION
319
+
320
+ ### Chief Complaint
321
+ {patient_info.get('complaint', 'Not provided')}
322
+
323
+ ### Medical History
324
+ {patient_info.get('medical_history', 'Not provided')}
325
+
326
+ ### Physical Examination
327
+ {patient_info.get('examination', 'Not provided')}
328
+
329
+ ---
330
+
331
+ ## πŸ”¬ DIAGNOSTIC RESULTS
332
+
333
+ ### Heart Sounds Analysis
334
+ {patient_info.get('heartbeat_analysis', 'Not performed')}
335
+
336
+ ### Investigations
337
+ {patient_info.get('investigation_analysis', 'Not provided')}
338
+
339
+ ---
340
+
341
+ ## ⚠️ SYSTEM STATUS
342
+ **AI-powered comprehensive assessment not available.**
343
+ Please install google-generativeai and configure GOOGLE_API_KEY for full AI features.
344
+
345
+ ---
346
+
347
+ ## πŸ“‹ BASIC RECOMMENDATIONS
348
+ 1. **Immediate:** Review all clinical findings with qualified healthcare professional
349
+ 2. **Assessment:** Correlate examination findings with investigation results
350
+ 3. **Follow-up:** Consider appropriate follow-up based on clinical presentation
351
+ 4. **Documentation:** Ensure proper documentation and patient safety protocols
352
+
353
+ ---
354
+
355
+ ## 🍎 GENERAL NUTRITION GUIDELINES
356
+ - **Hydration:** Maintain adequate fluid intake (8-10 glasses water/day)
357
+ - **Balanced Diet:** Include fruits, vegetables, whole grains, lean proteins
358
+ - **Heart Health:** Limit sodium, saturated fats, processed foods
359
+ - **Portion Control:** Maintain healthy portion sizes based on BMI
360
+
361
+ ---
362
+
363
+ **⚠️ DISCLAIMER:** This assessment is for educational purposes only. Always consult qualified healthcare professionals for medical decisions."""
364
+
365
  try:
366
+ # Calculate BMI if available
367
+ bmi_calculation = ""
368
+ if patient_info.get('weight') and patient_info.get('height'):
369
+ try:
370
+ weight = float(patient_info.get('weight'))
371
+ height = float(patient_info.get('height')) / 100 # Convert cm to m
372
+ bmi = weight / (height * height)
373
+
374
+ if bmi < 18.5:
375
+ bmi_status = "Underweight"
376
+ elif 18.5 <= bmi < 25:
377
+ bmi_status = "Normal weight"
378
+ elif 25 <= bmi < 30:
379
+ bmi_status = "Overweight"
380
+ else:
381
+ bmi_status = "Obese"
382
+
383
+ bmi_calculation = f"BMI: {bmi:.1f} kg/mΒ² ({bmi_status})"
384
+ except:
385
+ bmi_calculation = "BMI: Unable to calculate"
386
+
387
+ # Prepare enhanced prompt with nutrition requirements
388
  prompt = f"""
389
+ As a comprehensive medical AI, provide a detailed professional medical assessment based on the following patient data.
390
+ Format your response with clear headings and professional medical language:
391
 
392
  PATIENT DEMOGRAPHICS:
393
  - Name: {patient_info.get('name', 'Not provided')}
394
+ - Age: {patient_info.get('age', 'Not provided')} years
395
  - Sex: {patient_info.get('sex', 'Not provided')}
396
  - Weight: {patient_info.get('weight', 'Not provided')} kg
397
  - Height: {patient_info.get('height', 'Not provided')} cm
398
+ - {bmi_calculation}
399
 
400
  CHIEF COMPLAINT:
401
  {patient_info.get('complaint', 'Not provided')}
 
412
  INVESTIGATIONS:
413
  {patient_info.get('investigation_analysis', 'Not provided')}
414
 
415
+ Please provide a comprehensive medical assessment with the following structure:
416
+
417
+ 1. **CLINICAL SUMMARY** - Concise overview of the case
418
+ 2. **DIFFERENTIAL DIAGNOSIS** - List possible conditions with rationale
419
+ 3. **RISK FACTORS ASSESSMENT** - Identify relevant risk factors
420
+ 4. **RECOMMENDED TREATMENT PLAN** - Detailed treatment approach
421
+ 5. **FOLLOW-UP RECOMMENDATIONS** - Specific follow-up plans
422
+ 6. **NUTRITIONAL MANAGEMENT PLAN** - Detailed nutrition recommendations based on:
423
+ - Patient's current condition
424
+ - Age and sex-specific requirements
425
+ - Weight management if needed
426
+ - Heart health considerations
427
+ - Specific dietary modifications for the condition
428
+ - Meal planning suggestions
429
+ - Hydration recommendations
430
+ 7. **PATIENT EDUCATION POINTS** - Key points for patient understanding
431
+ 8. **PROGNOSIS** - Expected outcomes and timeline
432
 
433
+ Please use professional medical terminology and format with clear headings.
434
+ Make the nutritional section comprehensive and specific to this patient's needs.
435
+ Remember this is for educational purposes and emphasize the need for professional medical consultation.
436
  """
437
 
438
  response = gemini_model.generate_content(prompt)
439
+
440
+ # Format the response with better styling
441
+ formatted_response = f"""# πŸ₯ COMPREHENSIVE MEDICAL ASSESSMENT
442
+
443
+ ## πŸ‘€ PATIENT INFORMATION
444
+ **Name:** {patient_info.get('name', 'Not provided')}
445
+ **Age:** {patient_info.get('age', 'Not provided')} years
446
+ **Sex:** {patient_info.get('sex', 'Not provided')}
447
+ **Weight:** {patient_info.get('weight', 'Not provided')} kg
448
+ **Height:** {patient_info.get('height', 'Not provided')} cm
449
+ **{bmi_calculation}**
450
+
451
+ ---
452
+
453
+ {response.text}
454
+
455
+ ---
456
+
457
+ ## ⚠️ IMPORTANT DISCLAIMERS
458
+ - **Educational Purpose:** This assessment is for educational purposes only
459
+ - **Professional Consultation:** Always consult qualified healthcare professionals
460
+ - **Emergency:** Seek immediate medical attention for urgent symptoms
461
+ - **AI Limitations:** AI analysis supplements but does not replace clinical judgment
462
+
463
+ ---
464
+
465
+ **Generated on:** {datetime.now().strftime('%Y-%m-%d at %H:%M:%S')}"""
466
+
467
+ return formatted_response
468
 
469
  except Exception as e:
470
+ return f"# ❌ Error Generating Assessment\n\n**Error Details:** {str(e)}\n\nPlease check your Gemini API configuration and try again."
471
 
472
  def save_patient_data(name, age, sex, weight, height, complaint, medical_history,
473
  examination, heartbeat_results, investigation_analysis):
 
525
 
526
  # Create Gradio interface
527
  def create_interface():
528
+ with gr.Blocks(
529
+ title="Comprehensive Medical Consultation System",
530
+ theme=gr.themes.Soft(),
531
+ css="""
532
+ .medical-assessment textarea {
533
+ font-size: 16px !important;
534
+ line-height: 1.6 !important;
535
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif !important;
536
+ }
537
+ .gradio-container {
538
+ font-size: 14px;
539
+ }
540
+ .gr-textbox textarea {
541
+ font-size: 14px !important;
542
+ }
543
+ h1, h2, h3 {
544
+ color: #2c3e50 !important;
545
+ }
546
+ .medical-assessment .gr-textbox {
547
+ background-color: #f8f9fa !important;
548
+ }
549
+ """
550
+ ) as demo:
551
 
552
  gr.Markdown("""
553
  # πŸ₯ Comprehensive Medical Consultation System
 
639
 
640
  assessment_output = gr.Textbox(
641
  label="AI-Generated Medical Assessment",
642
+ lines=20, # Increased from 15 to 20 for more space
643
+ placeholder="Complete medical assessment will be generated here based on all provided information...",
644
+ elem_classes=["medical-assessment"] # Add CSS class for styling
645
  )
646
 
647
  # Hidden outputs to collect all data
 
714
 
715
  # Launch the application
716
  if __name__ == "__main__":
717
+ print("\nπŸ₯ MEDICAL CONSULTATION SYSTEM")
718
+ print("=" * 50)
719
+
720
+ # Check system status
721
+ print("πŸ“‹ System Status Check:")
722
+ print(f"βœ… Gradio: Available")
723
+ print(f"{'βœ…' if LIBROSA_AVAILABLE else '⚠️'} Librosa: {'Available' if LIBROSA_AVAILABLE else 'Not installed'}")
724
+ print(f"{'βœ…' if TF_AVAILABLE else '⚠️'} TensorFlow: {'Available' if TF_AVAILABLE else 'Not installed'}")
725
+ print(f"{'βœ…' if GEMINI_AVAILABLE else '⚠️'} Gemini AI: {'Available' if GEMINI_AVAILABLE else 'Not installed'}")
726
 
727
+ # Check Gemini API key
728
+ if GEMINI_AVAILABLE:
729
+ if os.getenv("GOOGLE_API_KEY"):
730
+ print("πŸ”‘ Gemini API Key: Configured")
731
+ else:
732
+ print("⚠️ Gemini API Key: Not set (AI features limited)")
733
+ print(" Set with: export GOOGLE_API_KEY='your_api_key_here'")
734
+
735
+ print("\nπŸš€ Starting application...")
736
+ print("🌐 The app will be available at: http://localhost:7860")
737
+ print("=" * 50)
738
+
739
+ try:
740
+ demo = create_interface()
741
+ demo.launch(
742
+ server_name="0.0.0.0",
743
+ server_port=7860,
744
+ share=True,
745
+ debug=False, # Set to False to reduce console output
746
+ show_error=True
747
+ )
748
+ except Exception as e:
749
+ print(f"❌ Error starting application: {e}")
750
+ print("Please check the error message above and ensure all dependencies are installed correctly.")