Spaces:
Sleeping
Sleeping
File size: 2,219 Bytes
5162902 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import time
from functools import wraps
from collections import defaultdict
import numpy as np
class PerformanceMonitor:
def __init__(self):
self.response_times = defaultdict(list)
self.success_counts = defaultdict(int)
self.total_counts = defaultdict(int)
self.problem_types = defaultdict(int)
self.total_problems = 0
def record_response_time(self, model_type: str, time: float):
"""Record response time for a model"""
self.response_times[model_type].append(time)
def record_success(self, model_type: str, success: bool):
"""Record success/failure for a model"""
self.total_counts[model_type] += 1
if success:
self.success_counts[model_type] += 1
def record_problem_type(self, problem_type: str):
"""Record problem type"""
self.problem_types[problem_type] += 1
self.total_problems += 1
def get_statistics(self) -> dict:
"""Get current performance statistics"""
stats = {}
# Calculate average response times
for model_type, times in self.response_times.items():
if times:
stats[f'{model_type}_avg_response_time'] = np.mean(times)
# Calculate success rates
for model_type in self.total_counts.keys():
total = self.total_counts[model_type]
if total > 0:
success_rate = (self.success_counts[model_type] / total) * 100
stats[f'{model_type}_success_rate'] = success_rate
# Calculate problem type distribution
if self.total_problems > 0:
distribution = {
ptype: (count / self.total_problems) * 100
for ptype, count in self.problem_types.items()
}
stats['problem_type_distribution'] = distribution
return stats
def measure_time(func):
"""Decorator to measure function execution time"""
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
return result, end_time - start_time
return wrapper
|