File size: 14,343 Bytes
174e0f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
"""
APR chart implementations.
"""
import plotly.graph_objects as go
import pandas as pd
import logging
from datetime import datetime
from typing import Dict, Any, Optional, Tuple

from ..config.constants import DATE_RANGES, Y_AXIS_RANGES, FILE_PATHS
from .base_chart import BaseChart

logger = logging.getLogger(__name__)


class APRChart(BaseChart):
    """Chart for APR visualizations."""
    
    def create_chart(self, df: pd.DataFrame, **kwargs) -> go.Figure:
        """Create APR time series chart."""
        if df.empty:
            return self._create_empty_chart("No APR data available")
        
        # Filter for APR data only
        apr_data = df[df['metric_type'] == 'APR'].copy()
        if apr_data.empty:
            return self._create_empty_chart("No APR data available")
        
        # Apply daily median aggregation to reduce outliers
        apr_data = self.data_processor.aggregate_daily_medians(apr_data, ['apr', 'adjusted_apr'])
        if apr_data.empty:
            return self._create_empty_chart("No APR data available after aggregation")
        
        # Apply high APR filtering (400% threshold) with forward filling
        apr_data, _ = self.data_processor.filter_high_apr_values(apr_data)
        if apr_data.empty:
            return self._create_empty_chart("No APR data available after high APR filtering")
        
        # Save processed APR data for verification (after all processing steps)
        processed_csv_path = self.data_processor.save_to_csv(apr_data, FILE_PATHS['apr_processed_csv'])
        if processed_csv_path:
            logger.info(f"Saved processed APR data to {processed_csv_path} for verification")
            logger.info(f"Processed APR data contains {len(apr_data)} rows after agent exclusion, zero filtering, daily median aggregation, and high APR filtering")
        
        # Filter outliers (disabled but keeping for compatibility)
        apr_data = self._filter_outliers(apr_data, 'apr')
        
        # Get time range
        min_time = apr_data['timestamp'].min()
        max_time = apr_data['timestamp'].max()
        x_start_date = DATE_RANGES['apr_start']
        
        # Create figure
        fig = self._create_base_figure()
        
        # Add background shapes
        y_range = Y_AXIS_RANGES['apr']
        self._add_background_shapes(fig, min_time, max_time, y_range['min'], y_range['max'])
        self._add_zero_line(fig, min_time, max_time)
        
        # Calculate moving averages
        avg_apr_data = self._calculate_moving_average(apr_data, 'apr')
        
        # Add individual agent data points
        unique_agents = apr_data['agent_name'].unique()
        color_map = self._get_color_map(unique_agents)
        self._add_agent_data_points(fig, apr_data, 'apr', color_map)
        
        # Add APR moving average line
        self._add_moving_average_line(
            fig, avg_apr_data, 'apr', 
            'Average APR (7d window)', 
            self.colors['apr'], 
            width=3
        )
        
        # Add adjusted APR moving average if available
        if 'adjusted_apr' in apr_data.columns and apr_data['adjusted_apr'].notna().any():
            # Calculate adjusted APR moving average
            adjusted_avg_data = self._calculate_moving_average(apr_data, 'adjusted_apr')
            
            # Handle missing values with forward fill (fix the column name)
            import warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", FutureWarning)
                # Fix: Use the correct column name 'moving_avg' not 'adjusted_moving_avg'
                adjusted_avg_data['moving_avg'] = adjusted_avg_data['moving_avg'].ffill()
            
            self._add_moving_average_line(
                fig, adjusted_avg_data, 'adjusted_apr',
                'Average ETH Adjusted APR (7d window)',
                self.colors['adjusted_apr'],
                width=3
            )
        
        # Update layout and axes
        self._update_layout(
            fig, 
            title="Modius Agents",
            y_axis_title=None,  # Remove single y-axis title to use region-specific labels
            y_range=[y_range['min'], y_range['max']]
        )
        
        # Find last valid date for x-axis range
        last_valid_date = avg_apr_data['timestamp'].max() if not avg_apr_data.empty else max_time
        
        self._update_axes(
            fig,
            x_range=[x_start_date, last_valid_date],
            y_range=[y_range['min'], y_range['max']]
        )
        
        # Add region-specific annotations for positive and negative areas
        self._add_region_annotations(fig, y_range)
        
        # Save chart
        self._save_chart(
            fig, 
            FILE_PATHS['apr_graph_html'], 
            FILE_PATHS['apr_graph_png']
        )
        
        return fig
    
    def _add_region_annotations(self, fig: go.Figure, y_range: Dict[str, float]) -> None:
        """Add annotations for positive and negative regions."""
        # Annotation for negative region
        fig.add_annotation(
            x=-0.08,
            y=-25,
            xref="paper",
            yref="y",
            text="Percent drawdown [%]",
            showarrow=False,
            font=dict(
                size=16, 
                family=self.config['font_family'], 
                color="black", 
                weight="bold"
            ),
            textangle=-90,
            align="center"
        )
        
        # Annotation for positive region
        fig.add_annotation(
            x=-0.08,
            y=50,
            xref="paper",
            yref="y",
            text="Agent APR [%]",
            showarrow=False,
            font=dict(
                size=16, 
                family=self.config['font_family'], 
                color="black", 
                weight="bold"
            ),
            textangle=-90,
            align="center"
        )


class APRHashChart(BaseChart):
    """Chart for APR vs Agent Hash visualizations."""
    
    def create_chart(self, df: pd.DataFrame, **kwargs) -> go.Figure:
        """Create APR vs agent hash bar chart."""
        if df.empty:
            return self._create_empty_chart("No agent hash data available")
        
        # Data is already filtered and processed by the calling function
        # Just filter for data with agent hash
        apr_data = df[df['agent_hash'].notna()].copy()
        if apr_data.empty:
            return self._create_empty_chart("No valid APR data with agent_hash found")
        
        logger.info(f"APR Hash Chart: Using {len(apr_data)} processed data points")
        
        # Create figure
        fig = self._create_base_figure()
        
        # Get unique hashes and create version mapping
        unique_hashes = apr_data['agent_hash'].unique()
        version_map = self._create_version_map(unique_hashes)
        
        # Sort hashes by version
        sorted_hashes = sorted(unique_hashes, key=lambda h: "1" if h.endswith("tby") else "2" if h.endswith("vq") else h)
        
        # Add zero line for bar chart
        self._add_zero_line(fig, -0.5, len(version_map) - 0.5)
        
        # Version colors
        version_colors = {
            "v0.4.1": "rgba(31, 119, 180, 0.7)",
            "v0.4.2": "rgba(44, 160, 44, 0.7)",
        }
        default_color = "rgba(214, 39, 40, 0.7)"
        
        # Aggregate data by version for bar chart
        version_data = {}
        version_stats = {}
        
        # Aggregate data by version
        for agent_hash in sorted_hashes:
            hash_data = apr_data[apr_data['agent_hash'] == agent_hash]
            version = version_map[agent_hash]
            
            # Calculate statistics
            apr_values = hash_data['apr'].tolist()
            
            # Store statistics for version comparison
            if version not in version_stats:
                version_stats[version] = {'apr_values': [], 'count': 0, 'hashes': []}
            
            version_stats[version]['apr_values'].extend(apr_values)
            version_stats[version]['count'] += len(apr_values)
            version_stats[version]['hashes'].append(agent_hash)
        
        # Create bar chart data
        versions = list(version_stats.keys())
        medians = []
        colors = []
        hover_texts = []
        
        for version in versions:
            # Calculate median APR for this version
            all_values = version_stats[version]['apr_values']
            median_apr = pd.Series(all_values).median()
            medians.append(median_apr)
            
            # Choose color
            color = version_colors.get(version, default_color)
            colors.append(color)
            
            # Create hover text
            count = version_stats[version]['count']
            mean_apr = pd.Series(all_values).mean()
            min_apr = pd.Series(all_values).min()
            max_apr = pd.Series(all_values).max()
            
            hover_text = (
                f"Version: {version}<br>"
                f"Median APR: {median_apr:.2f}%<br>"
                f"Mean APR: {mean_apr:.2f}%<br>"
                f"Min APR: {min_apr:.2f}%<br>"
                f"Max APR: {max_apr:.2f}%<br>"
                f"Data points: {count}"
            )
            hover_texts.append(hover_text)
        
        # Add bar chart
        fig.add_trace(
            go.Bar(
                x=versions,
                y=medians,
                marker=dict(
                    color=colors,
                    line=dict(width=1, color='black')
                ),
                hoverinfo='text',
                hovertext=hover_texts,
                showlegend=False,
                name="Median APR"
            )
        )
        
        # Add median value annotations on top of bars
        for i, (version, median_apr) in enumerate(zip(versions, medians)):
            fig.add_annotation(
                x=version,
                y=median_apr + (max(medians) * 0.05),  # 5% above the bar
                text=f"{median_apr:.1f}%",
                showarrow=False,
                font=dict(
                    family=self.config['font_family'],
                    size=14,
                    color="black",
                    weight="bold"
                )
            )
        
        # Add version comparison annotation
        self._add_version_comparison(fig, version_stats, len(versions))
        
        # Update layout
        self._update_layout(
            fig,
            title="Performance Graph",
            height=900
        )
        
        # Update axes
        self._update_axes(fig, y_auto=True)
        
        # Update x-axis for bar chart
        fig.update_xaxes(
            tickangle=-45
        )
        
        # Save chart
        self._save_chart(
            fig,
            FILE_PATHS['apr_hash_graph_html'],
            FILE_PATHS['apr_hash_graph_png']
        )
        
        return fig
    
    def _create_version_map(self, hashes: list) -> Dict[str, str]:
        """Create version mapping for agent hashes."""
        version_map = {}
        for hash_val in hashes:
            if hash_val.endswith("tby"):
                version_map[hash_val] = "v0.4.1"
            elif hash_val.endswith("vq"):
                version_map[hash_val] = "v0.4.2"
            else:
                version_map[hash_val] = f"Hash: {hash_val[-6:]}"
        return version_map
    
    def _add_version_comparison(self, fig: go.Figure, version_stats: Dict, num_hashes: int) -> None:
        """Add version comparison annotation."""
        if "v0.4.1" in version_stats and "v0.4.2" in version_stats:
            v041_values = version_stats["v0.4.1"]["apr_values"]
            v042_values = version_stats["v0.4.2"]["apr_values"]
            
            v041_median = pd.Series(v041_values).median()
            v042_median = pd.Series(v042_values).median()
            
            improvement = v042_median - v041_median
            change_text = "improvement" if improvement > 0 else "decrease"
            
            fig.add_annotation(
                x=(num_hashes - 1) / 2,
                y=90,
                text=f"<b>Version Comparison:</b> {abs(improvement):.2f}% {change_text} from v0.4.1 to v0.4.2",
                showarrow=False,
                font=dict(
                    family=self.config['font_family'],
                    size=16,
                    color="black",
                    weight="bold"
                ),
                bgcolor="rgba(255, 255, 255, 0.9)",
                bordercolor="black",
                borderwidth=2,
                borderpad=6,
                opacity=0.9
            )


def generate_apr_visualizations(data_processor=None) -> Tuple[go.Figure, Optional[str]]:
    """Generate APR visualizations."""
    from ..data.data_processor import DataProcessor
    
    if data_processor is None:
        data_processor = DataProcessor()
    
    # Fetch data
    apr_df, _ = data_processor.fetch_apr_data_from_db()
    
    # Create chart
    apr_chart = APRChart(data_processor)
    fig, csv_path = apr_chart.generate_visualization(
        apr_df, 
        csv_filename=FILE_PATHS['apr_csv']
    )
    
    return fig, csv_path


def generate_apr_vs_agent_hash_visualizations(df: pd.DataFrame) -> Tuple[go.Figure, Optional[str]]:
    """Generate APR vs agent hash visualizations."""
    from ..data.data_processor import DataProcessor
    
    data_processor = DataProcessor()
    
    # Use the same processed data as the APR time series chart
    # First apply the same filtering pipeline to get consistent data
    apr_data = df[df['metric_type'] == 'APR'].copy()
    if not apr_data.empty:
        # Apply daily median aggregation
        apr_data = data_processor.aggregate_daily_medians(apr_data, ['apr', 'adjusted_apr'])
        # Apply high APR filtering
        apr_data, _ = data_processor.filter_high_apr_values(apr_data)
    
    # Create chart using the processed data
    apr_hash_chart = APRHashChart(data_processor)
    fig, csv_path = apr_hash_chart.generate_visualization(
        apr_data,
        csv_filename=FILE_PATHS['apr_hash_csv']
    )
    
    return fig, csv_path