xingqiang commited on
Commit
3599276
·
1 Parent(s): 9e65baa

Enhanced technical report generation with comprehensive analysis features

Browse files
Files changed (2) hide show
  1. app.py +189 -45
  2. requirements.txt +2 -1
app.py CHANGED
@@ -6,6 +6,8 @@ import os
6
  from pathlib import Path
7
  from datetime import datetime
8
  import tempfile
 
 
9
 
10
  from model import RadarDetectionModel
11
  from feature_extraction import (calculate_amplitude, classify_amplitude,
@@ -19,34 +21,119 @@ from database import save_report, get_report_history
19
 
20
  class TechnicalReportGenerator:
21
  def __init__(self):
22
- self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
23
 
24
- def _generate_technical_section(self, detection_result, features):
25
- """Generate technical analysis section of the report."""
26
- tech_doc = "## Technical Analysis\n\n"
27
-
28
- # Detection Results
29
- tech_doc += "### Detection Results\n\n```python\n"
30
- tech_doc += f"Confidence Scores: {detection_result['scores'].tolist()}\n"
31
- tech_doc += f"Bounding Boxes: {detection_result['boxes'].tolist()}\n"
32
- tech_doc += f"Labels: {detection_result['labels'].tolist()}\n"
33
- tech_doc += "```\n\n"
34
-
35
- # Feature Analysis
36
- tech_doc += "### Feature Analysis\n\n"
37
- for feature_name, value in features.items():
38
- tech_doc += f"- **{feature_name}**: {value}\n"
39
-
40
- # Signal Processing Details
41
- tech_doc += "\n### Signal Processing Metrics\n\n"
42
- tech_doc += "| Metric | Value | Classification |\n"
43
- tech_doc += "|--------|--------|----------------|\n"
44
- tech_doc += f"|Amplitude|{features['Amplitude']}|{classify_amplitude(features['Amplitude'])}|\n"
45
- tech_doc += f"|Distribution Range|{features['Distribution Range']}|{features['Distribution Range']}|\n"
46
- tech_doc += f"|Attenuation Rate|{features['Attenuation Rate']}|{features['Attenuation Rate']}|\n"
47
- tech_doc += f"|Reflection Count|{features['Reflection Count']}|{features['Reflection Count']}|\n"
48
-
49
- return tech_doc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  # Initialize model with HF token from environment
52
  model = None
@@ -108,28 +195,66 @@ def process_image(image, generate_tech_report=False):
108
  "Reflection Count": reflection_class
109
  }
110
 
111
- # Generate report and visualizations
112
- report = generate_report(detection_result, image, features)
113
- detection_image = plot_detection(image, detection_result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
- # Generate technical report if requested
116
- tech_report = None
117
  if generate_tech_report:
118
- report_gen = TechnicalReportGenerator()
119
- tech_report = report_gen._generate_technical_section(detection_result, features)
 
 
 
 
 
120
 
121
- # Save technical report to a temporary file
122
- with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.md') as f:
 
 
 
 
123
  f.write(tech_report)
124
- tech_report = f.name
125
-
126
- # Save report if database is configured
127
- try:
128
- save_report(report)
129
- except Exception as e:
130
- print(f"Warning: Could not save report: {str(e)}")
131
-
132
- return detection_image, render_report(report), tech_report
133
 
134
  except Exception as e:
135
  error_msg = f"Error processing image: {str(e)}"
@@ -154,6 +279,25 @@ def display_history():
154
  except Exception as e:
155
  return f"Error retrieving history: {str(e)}"
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  # Create Gradio interface
158
  css = """
159
  .gradio-container {max-width: 1200px !important}
 
6
  from pathlib import Path
7
  from datetime import datetime
8
  import tempfile
9
+ import time
10
+ import psutil
11
 
12
  from model import RadarDetectionModel
13
  from feature_extraction import (calculate_amplitude, classify_amplitude,
 
21
 
22
  class TechnicalReportGenerator:
23
  def __init__(self):
24
+ self.timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
25
 
26
+ def generate_model_analysis(self, model_outputs):
27
+ """Generate model-specific analysis section"""
28
+ model_section = "## Model Analysis\n\n"
29
+
30
+ # Image encoder analysis
31
+ model_section += "### Image Encoder (SigLIP-So400m) Analysis\n"
32
+ model_section += "- Feature extraction quality: {:.2f}%\n".format(model_outputs.get('feature_quality', 0) * 100)
33
+ model_section += "- Image encoding latency: {:.2f}ms\n".format(model_outputs.get('encoding_latency', 0))
34
+ model_section += "- Feature map dimensions: {}\n\n".format(model_outputs.get('feature_dimensions', 'N/A'))
35
+
36
+ # Text decoder analysis
37
+ model_section += "### Text Decoder (Gemma-2B) Analysis\n"
38
+ model_section += "- Text generation confidence: {:.2f}%\n".format(model_outputs.get('text_confidence', 0) * 100)
39
+ model_section += "- Decoding latency: {:.2f}ms\n".format(model_outputs.get('decoding_latency', 0))
40
+ model_section += "- Token processing rate: {:.2f} tokens/sec\n\n".format(model_outputs.get('token_rate', 0))
41
+
42
+ return model_section
43
+
44
+ def generate_detection_analysis(self, detection_results):
45
+ """Generate detailed detection analysis section"""
46
+ detection_section = "## Detection Analysis\n\n"
47
+
48
+ # Detection metrics
49
+ detection_section += "### Object Detection Metrics\n"
50
+ detection_section += "| Metric | Value |\n"
51
+ detection_section += "|--------|-------|\n"
52
+ detection_section += "| Detection Count | {} |\n".format(len(detection_results.get('boxes', [])))
53
+ detection_section += "| Average Confidence | {:.2f}% |\n".format(
54
+ np.mean(detection_results.get('scores', [0])) * 100
55
+ )
56
+ detection_section += "| Processing Time | {:.2f}ms |\n\n".format(
57
+ detection_results.get('processing_time', 0)
58
+ )
59
+
60
+ # Detailed detection results
61
+ detection_section += "### Detection Details\n"
62
+ detection_section += "| Object | Confidence | Bounding Box |\n"
63
+ detection_section += "|--------|------------|---------------|\n"
64
+
65
+ boxes = detection_results.get('boxes', [])
66
+ scores = detection_results.get('scores', [])
67
+ labels = detection_results.get('labels', [])
68
+
69
+ for box, score, label in zip(boxes, scores, labels):
70
+ detection_section += "| {} | {:.2f}% | {} |\n".format(
71
+ label,
72
+ score * 100,
73
+ [round(coord, 2) for coord in box]
74
+ )
75
+
76
+ return detection_section
77
+
78
+ def generate_multimodal_analysis(self, mm_results):
79
+ """Generate multimodal analysis section"""
80
+ mm_section = "## Multimodal Analysis\n\n"
81
+
82
+ # Feature correlation analysis
83
+ mm_section += "### Feature Correlation Analysis\n"
84
+ mm_section += "- Text-Image Alignment Score: {:.2f}%\n".format(
85
+ mm_results.get('alignment_score', 0) * 100
86
+ )
87
+ mm_section += "- Cross-Modal Coherence: {:.2f}%\n".format(
88
+ mm_results.get('coherence_score', 0) * 100
89
+ )
90
+ mm_section += "- Feature Space Correlation: {:.2f}\n\n".format(
91
+ mm_results.get('feature_correlation', 0)
92
+ )
93
+
94
+ return mm_section
95
+
96
+ def generate_performance_metrics(self, perf_data):
97
+ """Generate performance metrics section"""
98
+ perf_section = "## Performance Metrics\n\n"
99
+
100
+ # System metrics
101
+ perf_section += "### System Performance\n"
102
+ perf_section += "- Total Processing Time: {:.2f}ms\n".format(perf_data.get('total_time', 0))
103
+ perf_section += "- Peak Memory Usage: {:.2f}MB\n".format(perf_data.get('peak_memory', 0))
104
+ perf_section += "- GPU Utilization: {:.2f}%\n\n".format(perf_data.get('gpu_util', 0))
105
+
106
+ # Pipeline metrics
107
+ perf_section += "### Pipeline Statistics\n"
108
+ perf_section += "| Stage | Time (ms) | Memory (MB) |\n"
109
+ perf_section += "|-------|------------|-------------|\n"
110
+ pipeline_stages = perf_data.get('pipeline_stats', {})
111
+ for stage, stats in pipeline_stages.items():
112
+ perf_section += "| {} | {:.2f} | {:.2f} |\n".format(
113
+ stage,
114
+ stats.get('time', 0),
115
+ stats.get('memory', 0)
116
+ )
117
+
118
+ return perf_section
119
+
120
+ def generate_report(self, results):
121
+ """Generate comprehensive technical report"""
122
+ report = f"# Technical Analysis Report\nGenerated at: {self.timestamp}\n\n"
123
+
124
+ # Add model analysis
125
+ report += self.generate_model_analysis(results.get('model_outputs', {}))
126
+
127
+ # Add detection analysis
128
+ report += self.generate_detection_analysis(results.get('detection_results', {}))
129
+
130
+ # Add multimodal analysis
131
+ report += self.generate_multimodal_analysis(results.get('multimodal_results', {}))
132
+
133
+ # Add performance metrics
134
+ report += self.generate_performance_metrics(results.get('performance_data', {}))
135
+
136
+ return report
137
 
138
  # Initialize model with HF token from environment
139
  model = None
 
195
  "Reflection Count": reflection_class
196
  }
197
 
198
+ # Start performance tracking
199
+ start_time = time.time()
200
+ performance_data = {
201
+ 'pipeline_stats': {},
202
+ 'peak_memory': 0,
203
+ 'gpu_util': 0
204
+ }
205
+
206
+ # Process image and get results
207
+ stage_start = time.time()
208
+ detection_results = run_detection(image)
209
+ performance_data['pipeline_stats']['detection'] = {
210
+ 'time': (time.time() - stage_start) * 1000,
211
+ 'memory': get_memory_usage()
212
+ }
213
+
214
+ # Extract features and analyze
215
+ stage_start = time.time()
216
+ model_outputs = extract_features(image)
217
+ performance_data['pipeline_stats']['feature_extraction'] = {
218
+ 'time': (time.time() - stage_start) * 1000,
219
+ 'memory': get_memory_usage()
220
+ }
221
+
222
+ # Perform multimodal analysis
223
+ stage_start = time.time()
224
+ multimodal_results = analyze_multimodal(model_outputs)
225
+ performance_data['pipeline_stats']['multimodal_analysis'] = {
226
+ 'time': (time.time() - stage_start) * 1000,
227
+ 'memory': get_memory_usage()
228
+ }
229
+
230
+ # Update performance data
231
+ performance_data['total_time'] = (time.time() - start_time) * 1000
232
+ performance_data['peak_memory'] = get_peak_memory_usage()
233
+ performance_data['gpu_util'] = get_gpu_utilization()
234
+
235
+ # Generate reports
236
+ analysis_report = generate_analysis_report(detection_results)
237
 
 
 
238
  if generate_tech_report:
239
+ # Prepare results for technical report
240
+ tech_report_data = {
241
+ 'model_outputs': model_outputs,
242
+ 'detection_results': detection_results,
243
+ 'multimodal_results': multimodal_results,
244
+ 'performance_data': performance_data
245
+ }
246
 
247
+ # Generate technical report
248
+ tech_report = TechnicalReportGenerator().generate_report(tech_report_data)
249
+
250
+ # Save technical report to temporary file
251
+ report_path = "temp_tech_report.md"
252
+ with open(report_path, "w") as f:
253
  f.write(tech_report)
254
+
255
+ return detection_results['image'], analysis_report, report_path
256
+
257
+ return detection_results['image'], analysis_report, None
 
 
 
 
 
258
 
259
  except Exception as e:
260
  error_msg = f"Error processing image: {str(e)}"
 
279
  except Exception as e:
280
  return f"Error retrieving history: {str(e)}"
281
 
282
+ def get_memory_usage():
283
+ """Get current memory usage in MB"""
284
+ process = psutil.Process()
285
+ return process.memory_info().rss / 1024 / 1024
286
+
287
+ def get_peak_memory_usage():
288
+ """Get peak memory usage in MB"""
289
+ process = psutil.Process()
290
+ return process.memory_info().peak_wset / 1024 / 1024 if hasattr(process.memory_info(), 'peak_wset') else 0
291
+
292
+ def get_gpu_utilization():
293
+ """Get GPU utilization percentage"""
294
+ try:
295
+ if torch.cuda.is_available():
296
+ return torch.cuda.utilization() if hasattr(torch.cuda, 'utilization') else 0
297
+ except:
298
+ pass
299
+ return 0
300
+
301
  # Create Gradio interface
302
  css = """
303
  .gradio-container {max-width: 1200px !important}
requirements.txt CHANGED
@@ -11,4 +11,5 @@ scikit-learn>=1.3.0
11
  jinja2>=3.1.2
12
  huggingface-hub>=0.19.0
13
  python-dotenv>=1.0.0
14
- markdown>=3.4.0
 
 
11
  jinja2>=3.1.2
12
  huggingface-hub>=0.19.0
13
  python-dotenv>=1.0.0
14
+ markdown>=3.4.0
15
+ psutil>=5.9.0