import requests import pandas as pd import gradio as gr import plotly.graph_objects as go import plotly.express as px from plotly.subplots import make_subplots from datetime import datetime, timedelta import json # Commenting out blockchain-related imports that cause loading issues # from web3 import Web3 import os import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import random import logging from typing import List, Dict, Any # Comment out the import for now and replace with dummy functions # from app_trans_new import create_transcation_visualizations,create_active_agents_visualizations # APR visualization functions integrated directly # Set up logging with appropriate verbosity logging.basicConfig( level=logging.INFO, # Use INFO level instead of DEBUG to reduce verbosity format="%(asctime)s - %(levelname)s - %(message)s", handlers=[ logging.FileHandler("app_debug.log"), # Log to file for persistence logging.StreamHandler() # Also log to console ] ) logger = logging.getLogger(__name__) # Reduce third-party library logging logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("httpx").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) # Log the startup information logger.info("============= APPLICATION STARTING =============") logger.info(f"Running from directory: {os.getcwd()}") # Global variable to store the data for reuse global_df = None # Configuration API_BASE_URL = "https://afmdb.autonolas.tech" logger.info(f"Using API endpoint: {API_BASE_URL}") def get_agent_type_by_name(type_name: str) -> Dict[str, Any]: """Get agent type by name""" url = f"{API_BASE_URL}/api/agent-types/name/{type_name}" logger.debug(f"Calling API: {url}") try: response = requests.get(url) logger.debug(f"Response status: {response.status_code}") if response.status_code == 404: logger.error(f"Agent type '{type_name}' not found") return None response.raise_for_status() result = response.json() logger.debug(f"Agent type response: {result}") return result except Exception as e: logger.error(f"Error in get_agent_type_by_name: {e}") return None def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]: """Get attribute definition by name""" url = f"{API_BASE_URL}/api/attributes/name/{attr_name}" logger.debug(f"Calling API: {url}") try: response = requests.get(url) logger.debug(f"Response status: {response.status_code}") if response.status_code == 404: logger.error(f"Attribute definition '{attr_name}' not found") return None response.raise_for_status() result = response.json() logger.debug(f"Attribute definition response: {result}") return result except Exception as e: logger.error(f"Error in get_attribute_definition_by_name: {e}") return None def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]: """Get all agents of a specific type""" url = f"{API_BASE_URL}/api/agent-types/{type_id}/agents/" logger.debug(f"Calling API: {url}") try: response = requests.get(url) logger.debug(f"Response status: {response.status_code}") if response.status_code == 404: logger.error(f"No agents found for type ID {type_id}") return [] response.raise_for_status() result = response.json() logger.debug(f"Agents count: {len(result)}") logger.debug(f"First few agents: {result[:2] if result else []}") return result except Exception as e: logger.error(f"Error in get_agents_by_type: {e}") return [] def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]: """Get all attribute values for a specific attribute definition across all agents of a given list""" all_attributes = [] logger.debug(f"Getting attributes for {len(agents)} agents with attr_def_id: {attr_def_id}") # For each agent, get their attributes and filter for the one we want for agent in agents: agent_id = agent["agent_id"] # Call the /api/agents/{agent_id}/attributes/ endpoint url = f"{API_BASE_URL}/api/agents/{agent_id}/attributes/" logger.debug(f"Calling API for agent {agent_id}: {url}") try: response = requests.get(url, params={"limit": 1000}) if response.status_code == 404: logger.error(f"No attributes found for agent ID {agent_id}") continue response.raise_for_status() agent_attrs = response.json() logger.debug(f"Agent {agent_id} has {len(agent_attrs)} attributes") # Filter for the specific attribute definition ID filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id] logger.debug(f"Agent {agent_id} has {len(filtered_attrs)} APR attributes") if filtered_attrs: logger.debug(f"Sample attribute for agent {agent_id}: {filtered_attrs[0]}") all_attributes.extend(filtered_attrs) except requests.exceptions.RequestException as e: logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}") logger.info(f"Total APR attributes found across all agents: {len(all_attributes)}") return all_attributes def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str: """Get agent name from agent ID""" for agent in agents: if agent["agent_id"] == agent_id: return agent["agent_name"] return "Unknown" def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]: """Extract APR value, adjusted APR value, and timestamp from JSON value""" try: agent_id = attr.get("agent_id", "unknown") logger.debug(f"Extracting APR value for agent {agent_id}") # The APR value is stored in the json_value field if attr["json_value"] is None: logger.debug(f"Agent {agent_id}: json_value is None") return {"apr": None, "adjusted_apr": None, "timestamp": None, "agent_id": agent_id, "is_dummy": False} # If json_value is a string, parse it if isinstance(attr["json_value"], str): logger.debug(f"Agent {agent_id}: json_value is string, parsing") json_data = json.loads(attr["json_value"]) else: json_data = attr["json_value"] apr = json_data.get("apr") adjusted_apr = json_data.get("adjusted_apr") # Extract adjusted_apr if present timestamp = json_data.get("timestamp") logger.debug(f"Agent {agent_id}: Raw APR value: {apr}, adjusted APR value: {adjusted_apr}, timestamp: {timestamp}") # Convert timestamp to datetime if it exists timestamp_dt = None if timestamp: timestamp_dt = datetime.fromtimestamp(timestamp) result = {"apr": apr, "adjusted_apr": adjusted_apr, "timestamp": timestamp_dt, "agent_id": agent_id, "is_dummy": False} logger.debug(f"Agent {agent_id}: Extracted result: {result}") return result except (json.JSONDecodeError, KeyError, TypeError) as e: logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}") logger.error(f"Problematic json_value: {attr.get('json_value')}") return {"apr": None, "adjusted_apr": None, "timestamp": None, "agent_id": attr.get('agent_id'), "is_dummy": False} def fetch_apr_data_from_db(): """ Fetch APR data from database using the API. """ global global_df logger.info("==== Starting APR data fetch ====") try: # Step 1: Find the Modius agent type logger.info("Finding Modius agent type") modius_type = get_agent_type_by_name("Modius") if not modius_type: logger.error("Modius agent type not found, using placeholder data") global_df = pd.DataFrame([]) return global_df type_id = modius_type["type_id"] logger.info(f"Found Modius agent type with ID: {type_id}") # Step 2: Find the APR attribute definition logger.info("Finding APR attribute definition") apr_attr_def = get_attribute_definition_by_name("APR") if not apr_attr_def: logger.error("APR attribute definition not found, using placeholder data") global_df = pd.DataFrame([]) return global_df attr_def_id = apr_attr_def["attr_def_id"] logger.info(f"Found APR attribute definition with ID: {attr_def_id}") # Step 3: Get all agents of type Modius logger.info(f"Getting all agents of type Modius (type_id: {type_id})") modius_agents = get_agents_by_type(type_id) if not modius_agents: logger.error("No agents of type 'Modius' found") global_df = pd.DataFrame([]) return global_df logger.info(f"Found {len(modius_agents)} Modius agents") logger.debug(f"Modius agents: {[{'agent_id': a['agent_id'], 'agent_name': a['agent_name']} for a in modius_agents]}") # Step 4: Fetch all APR values for Modius agents logger.info(f"Fetching APR values for all Modius agents (attr_def_id: {attr_def_id})") apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id) if not apr_attributes: logger.error("No APR values found for 'Modius' agents") global_df = pd.DataFrame([]) return global_df logger.info(f"Found {len(apr_attributes)} APR attributes total") # Step 5: Extract APR data logger.info("Extracting APR data from attributes") apr_data_list = [] for attr in apr_attributes: apr_data = extract_apr_value(attr) if apr_data["apr"] is not None and apr_data["timestamp"] is not None: # Get agent name agent_name = get_agent_name(attr["agent_id"], modius_agents) # Add agent name to the data apr_data["agent_name"] = agent_name # Add is_dummy flag (all real data) apr_data["is_dummy"] = False # Include all APR values (including negative ones) EXCEPT zero and -100 if apr_data["apr"] != 0 and apr_data["apr"] != -100: apr_data["metric_type"] = "APR" logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}") # Add to the data list apr_data_list.append(apr_data) else: # Log that we're skipping zero or -100 values logger.debug(f"Skipping value for agent {agent_name} ({attr['agent_id']}): {apr_data['apr']} (zero or -100)") # Convert list of dictionaries to DataFrame if not apr_data_list: logger.error("No valid APR data extracted") global_df = pd.DataFrame([]) return global_df global_df = pd.DataFrame(apr_data_list) # Log the resulting dataframe logger.info(f"Created DataFrame with {len(global_df)} rows") logger.info(f"DataFrame columns: {global_df.columns.tolist()}") logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}") # Log adjusted APR statistics if available if 'adjusted_apr' in global_df.columns and global_df['adjusted_apr'].notna().any(): logger.info(f"Adjusted APR statistics: min={global_df['adjusted_apr'].min()}, max={global_df['adjusted_apr'].max()}, mean={global_df['adjusted_apr'].mean()}") logger.info(f"Number of records with adjusted_apr: {global_df['adjusted_apr'].notna().sum()} out of {len(global_df)}") # Log the difference between APR and adjusted APR valid_rows = global_df[global_df['adjusted_apr'].notna()] if not valid_rows.empty: avg_diff = (valid_rows['apr'] - valid_rows['adjusted_apr']).mean() max_diff = (valid_rows['apr'] - valid_rows['adjusted_apr']).max() min_diff = (valid_rows['apr'] - valid_rows['adjusted_apr']).min() logger.info(f"APR vs Adjusted APR difference: avg={avg_diff:.2f}, max={max_diff:.2f}, min={min_diff:.2f}") else: logger.info("No adjusted APR values found in the data") # All values are APR type (excluding zero and -100 values) logger.info("All values are APR type (excluding zero and -100 values)") logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}") # Log the entire dataframe for debugging logger.debug("Final DataFrame contents:") for idx, row in global_df.iterrows(): logger.debug(f"Row {idx}: {row.to_dict()}") return global_df except requests.exceptions.RequestException as e: logger.error(f"API request error: {e}") global_df = pd.DataFrame([]) return global_df except Exception as e: logger.error(f"Error fetching APR data: {e}") logger.exception("Exception details:") global_df = pd.DataFrame([]) return global_df def generate_apr_visualizations(): """Generate APR visualizations with real data only (no dummy data)""" global global_df # Fetch data from database df = fetch_apr_data_from_db() # If we got no data at all, return placeholder figures if df.empty: logger.info("No APR data available. Using fallback visualization.") # Create empty visualizations with a message using Plotly fig = go.Figure() fig.add_annotation( x=0.5, y=0.5, text="No APR data available", font=dict(size=20), showarrow=False ) fig.update_layout( xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False) ) # Save as static file for reference fig.write_html("modius_apr_combined_graph.html") fig.write_image("modius_apr_combined_graph.png") csv_file = None return fig, csv_file # No longer generating dummy data # Set global_df for access by other functions global_df = df # Save to CSV before creating visualizations csv_file = save_to_csv(df) # Only create combined time series graph combined_fig = create_combined_time_series_graph(df) return combined_fig, csv_file def create_time_series_graph_per_agent(df): """Create a time series graph for each agent using Plotly""" # Get unique agents unique_agents = df['agent_id'].unique() if len(unique_agents) == 0: logger.error("No agent data to plot") fig = go.Figure() fig.add_annotation( text="No agent data available", x=0.5, y=0.5, showarrow=False, font=dict(size=20) ) return fig # Create a subplot figure for each agent fig = make_subplots(rows=len(unique_agents), cols=1, subplot_titles=[f"Agent: {df[df['agent_id'] == agent_id]['agent_name'].iloc[0]}" for agent_id in unique_agents], vertical_spacing=0.1) # Plot data for each agent for i, agent_id in enumerate(unique_agents): agent_data = df[df['agent_id'] == agent_id].copy() agent_name = agent_data['agent_name'].iloc[0] row = i + 1 # Add zero line to separate APR and Performance fig.add_shape( type="line", line=dict(dash="solid", width=1.5, color="black"), y0=0, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), row=row, col=1 ) # Add background colors fig.add_shape( type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0), y0=0, y1=1000, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), row=row, col=1, layer="below" ) fig.add_shape( type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0), y0=-1000, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), row=row, col=1, layer="below" ) # Create separate dataframes for different data types apr_data = agent_data[agent_data['metric_type'] == 'APR'] perf_data = agent_data[agent_data['metric_type'] == 'Performance'] # Sort all data by timestamp for the line plots combined_agent_data = agent_data.sort_values('timestamp') # Add main line connecting all points fig.add_trace( go.Scatter( x=combined_agent_data['timestamp'], y=combined_agent_data['apr'], mode='lines', line=dict(color='purple', width=2), name=f'{agent_name}', legendgroup=agent_name, showlegend=(i == 0), # Only show in legend once hovertemplate='Time: %{x}
Value: %{y:.2f}' ), row=row, col=1 ) # Add scatter points for APR values if not apr_data.empty: fig.add_trace( go.Scatter( x=apr_data['timestamp'], y=apr_data['apr'], mode='markers', marker=dict(color='blue', size=10, symbol='circle'), name='APR', legendgroup='APR', showlegend=(i == 0), hovertemplate='Time: %{x}
APR: %{y:.2f}' ), row=row, col=1 ) # Add scatter points for Performance values if not perf_data.empty: fig.add_trace( go.Scatter( x=perf_data['timestamp'], y=perf_data['apr'], mode='markers', marker=dict(color='red', size=10, symbol='square'), name='Performance', legendgroup='Performance', showlegend=(i == 0), hovertemplate='Time: %{x}
Performance: %{y:.2f}' ), row=row, col=1 ) # Update axes fig.update_xaxes(title_text="Time", row=row, col=1) fig.update_yaxes(title_text="Value", row=row, col=1, gridcolor='rgba(0,0,0,0.1)') # Update layout fig.update_layout( height=400 * len(unique_agents), width=1000, title_text="APR and Performance Values per Agent", template="plotly_white", legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1 ), margin=dict(r=20, l=20, t=30, b=20), hovermode="closest" ) # Save the figure (still useful for reference) graph_file = "modius_apr_per_agent_graph.html" fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) # Also save as image for compatibility img_file = "modius_apr_per_agent_graph.png" fig.write_image(img_file) logger.info(f"Per-agent graph saved to {graph_file} and {img_file}") # Return the figure object for direct use in Gradio return fig def write_debug_info(df, fig): """Minimal debug info function""" try: # Just log minimal information logger.debug(f"Graph created with {len(df)} data points and {len(fig.data)} traces") return True except Exception as e: logger.error(f"Error writing debug info: {e}") return False def create_combined_time_series_graph(df): """Create a time series graph showing average APR values across all agents""" if len(df) == 0: logger.error("No data to plot combined graph") fig = go.Figure() fig.add_annotation( text="No data available", x=0.5, y=0.5, showarrow=False, font=dict(size=20) ) return fig # IMPORTANT: Force data types to ensure consistency df['apr'] = df['apr'].astype(float) # Ensure APR is float df['metric_type'] = df['metric_type'].astype(str) # Ensure metric_type is string # Set x-axis start date to April 17, 2025 as requested by user x_start_date = datetime(2025, 4, 17, 0, 0, 0) # CRITICAL: Log the exact dataframe we're using for plotting to help debug logger.info(f"Graph data - shape: {df.shape}, columns: {df.columns}") logger.info(f"Graph data - unique agents: {df['agent_name'].unique().tolist()}") logger.info("Graph data - all positive APR values only") logger.info(f"Graph data - min APR: {df['apr'].min()}, max APR: {df['apr'].max()}") # Export full dataframe to CSV for debugging debug_csv = "debug_graph_data.csv" df.to_csv(debug_csv) logger.info(f"Exported graph data to {debug_csv} for debugging") # Write detailed data report with open("debug_graph_data_report.txt", "w") as f: f.write("==== GRAPH DATA REPORT ====\n\n") f.write(f"Total data points: {len(df)}\n") f.write(f"Timestamp range: {df['timestamp'].min()} to {df['timestamp'].max()}\n\n") # Output per-agent details unique_agents = df['agent_id'].unique() f.write(f"Number of agents: {len(unique_agents)}\n\n") for agent_id in unique_agents: agent_data = df[df['agent_id'] == agent_id] agent_name = agent_data['agent_name'].iloc[0] f.write(f"== Agent: {agent_name} (ID: {agent_id}) ==\n") f.write(f" Total data points: {len(agent_data)}\n") apr_data = agent_data[agent_data['metric_type'] == 'APR'] f.write(f" APR data points: {len(apr_data)}\n") if not apr_data.empty: f.write(f" APR values: {apr_data['apr'].tolist()}\n") f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n") f.write("\n") logger.info("Generated detailed graph data report") # ENSURE THERE ARE NO CONFLICTING AXES OR TRACES # Create Plotly figure in a clean state fig = go.Figure() # Enable autoscaling instead of fixed ranges logger.info("Using autoscaling for axes ranges") # Add background shapes for APR and Performance regions min_time = df['timestamp'].min() max_time = df['timestamp'].max() # Add shape for positive APR region (above zero) fig.add_shape( type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0), y0=0, y1=100, # Use a fixed positive value x0=min_time, x1=max_time, layer="below" ) # Add shape for negative APR region (below zero) fig.add_shape( type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0), y0=-100, y1=0, # Use a fixed negative value x0=min_time, x1=max_time, layer="below" ) # Add zero line fig.add_shape( type="line", line=dict(dash="solid", width=1.5, color="black"), y0=0, y1=0, x0=min_time, x1=max_time ) # MODIFIED: Calculate average APR values across all agents for each timestamp # Filter for APR data only apr_data = df[df['metric_type'] == 'APR'].copy() # Filter out outliers (APR values above 200 or below -200) outlier_data = apr_data[(apr_data['apr'] > 200) | (apr_data['apr'] < -200)].copy() apr_data_filtered = apr_data[(apr_data['apr'] <= 200) & (apr_data['apr'] >= -200)].copy() # Log the outliers for better debugging if len(outlier_data) > 0: excluded_count = len(outlier_data) logger.info(f"Excluded {excluded_count} data points with outlier APR values (>200 or <-200)") # Group outliers by agent for detailed logging outlier_agents = outlier_data.groupby('agent_name') for agent_name, agent_outliers in outlier_agents: logger.info(f"Agent '{agent_name}' has {len(agent_outliers)} outlier values:") for idx, row in agent_outliers.iterrows(): logger.info(f" - APR: {row['apr']}, timestamp: {row['timestamp']}") # Use the filtered data for all subsequent operations apr_data = apr_data_filtered # Group by timestamp and calculate mean APR avg_apr_data = apr_data.groupby('timestamp')['apr'].mean().reset_index() # Sort by timestamp avg_apr_data = avg_apr_data.sort_values('timestamp') # Log the average APR data logger.info(f"Calculated average APR data with {len(avg_apr_data)} points") for idx, row in avg_apr_data.iterrows(): logger.info(f" Average point {idx}: timestamp={row['timestamp']}, avg_apr={row['apr']}") # Calculate moving average based on a time window (2 hours) # Sort data by timestamp apr_data_sorted = apr_data.sort_values('timestamp') # Create a new dataframe for the moving average avg_apr_data_with_ma = avg_apr_data.copy() avg_apr_data_with_ma['moving_avg'] = None # Initialize the moving average column # Define the time window for the moving average (3 days) time_window = pd.Timedelta(days=3) logger.info(f"Calculating moving average with time window of {time_window}") # Calculate moving averages: one for APR and one for adjusted APR avg_apr_data_with_ma['moving_avg'] = None # 3-day window for APR avg_apr_data_with_ma['adjusted_moving_avg'] = None # 3-day window for adjusted APR # Calculate the moving averages for each timestamp for i, row in avg_apr_data_with_ma.iterrows(): current_time = row['timestamp'] window_start = current_time - time_window # Get all data points within the 3-day time window window_data = apr_data_sorted[ (apr_data_sorted['timestamp'] >= window_start) & (apr_data_sorted['timestamp'] <= current_time) ] # Calculate the average APR for the 3-day time window if not window_data.empty: avg_apr_data_with_ma.at[i, 'moving_avg'] = window_data['apr'].mean() logger.debug(f"APR time window {window_start} to {current_time}: {len(window_data)} points, avg={window_data['apr'].mean()}") # Calculate adjusted APR moving average if data exists if 'adjusted_apr' in window_data.columns and window_data['adjusted_apr'].notna().any(): avg_apr_data_with_ma.at[i, 'adjusted_moving_avg'] = window_data['adjusted_apr'].mean() logger.debug(f"Adjusted APR time window {window_start} to {current_time}: {len(window_data)} points, avg={window_data['adjusted_apr'].mean()}") else: # If no data points in the window, use the current value avg_apr_data_with_ma.at[i, 'moving_avg'] = row['apr'] logger.debug(f"No data points in time window for {current_time}, using current value {row['apr']}") logger.info(f"Calculated time-based moving averages with {len(avg_apr_data_with_ma)} points") # Plot individual agent data points with agent names in hover, but limit display for scalability if not apr_data.empty: # Group by agent to use different colors for each agent unique_agents = apr_data['agent_name'].unique() colors = px.colors.qualitative.Plotly[:len(unique_agents)] # Create a color map for agents color_map = {agent: colors[i % len(colors)] for i, agent in enumerate(unique_agents)} # Calculate the total number of data points per agent to determine which are most active agent_counts = apr_data['agent_name'].value_counts() # Determine how many agents to show individually (limit to top 5 most active) MAX_VISIBLE_AGENTS = 5 top_agents = agent_counts.nlargest(min(MAX_VISIBLE_AGENTS, len(agent_counts))).index.tolist() logger.info(f"Showing {len(top_agents)} agents by default out of {len(unique_agents)} total agents") # Add data points for each agent, but only make top agents visible by default for agent_name in unique_agents: agent_data = apr_data[apr_data['agent_name'] == agent_name] # Explicitly convert to Python lists x_values = agent_data['timestamp'].tolist() y_values = agent_data['apr'].tolist() # Change default visibility to False to hide all agent data points is_visible = False # Add data points as markers for APR fig.add_trace( go.Scatter( x=x_values, y=y_values, mode='markers', # Only markers for original data marker=dict( color=color_map[agent_name], symbol='circle', size=10, line=dict(width=1, color='black') ), name=f'Agent: {agent_name} (APR)', hovertemplate='Time: %{x}
APR: %{y:.2f}
Agent: ' + agent_name + '', visible=is_visible # All agents hidden by default ) ) logger.info(f"Added APR data points for agent {agent_name} with {len(x_values)} points (visible: {is_visible})") # Add data points for adjusted APR if it exists if 'adjusted_apr' in agent_data.columns and agent_data['adjusted_apr'].notna().any(): x_values_adj = agent_data['timestamp'].tolist() y_values_adj = agent_data['adjusted_apr'].tolist() fig.add_trace( go.Scatter( x=x_values_adj, y=y_values_adj, mode='markers', # Only markers for original data marker=dict( color=color_map[agent_name], symbol='diamond', # Different symbol for adjusted APR size=10, line=dict(width=1, color='black') ), name=f'Agent: {agent_name} (Adjusted APR)', hovertemplate='Time: %{x}
Adjusted APR: %{y:.2f}
Agent: ' + agent_name + '', visible=is_visible # All agents hidden by default ) ) logger.info(f"Added Adjusted APR data points for agent {agent_name} with {len(x_values_adj)} points (visible: {is_visible})") # Add APR moving average as a smooth line x_values_ma = avg_apr_data_with_ma['timestamp'].tolist() y_values_ma = avg_apr_data_with_ma['moving_avg'].tolist() # Create hover template for the APR moving average line hover_data_apr = [] for idx, row in avg_apr_data_with_ma.iterrows(): timestamp = row['timestamp'] hover_data_apr.append( f"Time: {timestamp}
Avg APR (3d window): {row['moving_avg']:.2f}" ) fig.add_trace( go.Scatter( x=x_values_ma, y=y_values_ma, mode='lines', # Only lines for moving average line=dict(color='red', width=2), # Thinner line name='Average APR (3d window)', hovertext=hover_data_apr, hoverinfo='text', visible=True # Visible by default ) ) logger.info(f"Added 3-day moving average APR trace with {len(x_values_ma)} points") # Add adjusted APR moving average line if it exists if 'adjusted_moving_avg' in avg_apr_data_with_ma.columns and avg_apr_data_with_ma['adjusted_moving_avg'].notna().any(): y_values_adj_ma = avg_apr_data_with_ma['adjusted_moving_avg'].tolist() # Create hover template for the adjusted APR moving average line hover_data_adj = [] for idx, row in avg_apr_data_with_ma.iterrows(): timestamp = row['timestamp'] if pd.notna(row['adjusted_moving_avg']): hover_data_adj.append( f"Time: {timestamp}
Avg ETH Adjusted APR (3d window): {row['adjusted_moving_avg']:.2f}" ) else: hover_data_adj.append( f"Time: {timestamp}
Avg ETH Adjusted APR (3d window): N/A" ) fig.add_trace( go.Scatter( x=x_values_ma, y=y_values_adj_ma, mode='lines', # Only lines for moving average line=dict(color='green', width=4), # Thicker solid line for adjusted APR name='Average ETH Adjusted APR (3d window)', hovertext=hover_data_adj, hoverinfo='text', visible=True # Visible by default ) ) logger.info(f"Added 3-day moving average Adjusted APR trace with {len(x_values_ma)} points") # Removed cumulative APR as requested logger.info("Cumulative APR graph line has been removed as requested") # Update layout - use simple boolean values everywhere # Make chart responsive instead of fixed width fig.update_layout( title=dict( text="Modius Agents", font=dict( family="Arial, sans-serif", size=22, color="black", weight="bold" ) ), xaxis_title=None, # Remove x-axis title to use annotation instead yaxis_title=None, # Remove the y-axis title as we'll use annotations instead template="plotly_white", height=600, # Reduced height for better fit on smaller screens # Removed fixed width to enable responsiveness autosize=True, # Enable auto-sizing for responsiveness legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1, groupclick="toggleitem" ), margin=dict(r=30, l=120, t=40, b=50), # Increased bottom margin for x-axis title hovermode="closest" ) # Add annotations for y-axis regions fig.add_annotation( x=-0.08, # Position further from the y-axis to avoid overlapping with tick labels y=-25, # Middle of the negative region xref="paper", yref="y", text="Percent drawdown [%]", showarrow=False, font=dict(size=16, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size textangle=-90, # Rotate text to be vertical align="center" ) fig.add_annotation( x=-0.08, # Position further from the y-axis to avoid overlapping with tick labels y=50, # Middle of the positive region xref="paper", yref="y", text="Agent APR [%]", showarrow=False, font=dict(size=16, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size textangle=-90, # Rotate text to be vertical align="center" ) # Remove x-axis title annotation # fig.add_annotation( # x=0.5, # Center of the x-axis # y=-0.15, # Below the x-axis # xref="paper", # yref="paper", # text="Date", # showarrow=False, # font=dict(size=16, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size # align="center" # ) # Update layout for legend fig.update_layout( legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1, groupclick="toggleitem", font=dict( family="Arial, sans-serif", size=14, # Adjusted font size color="black", weight="bold" ) ) ) # Update y-axis with fixed range of -50 to +100 for psychological effect fig.update_yaxes( showgrid=True, gridwidth=1, gridcolor='rgba(0,0,0,0.1)', # Use fixed range instead of autoscaling autorange=False, # Disable autoscaling range=[-50, 100], # Set fixed range from -50 to +100 tickformat=".2f", # Format tick labels with 2 decimal places tickfont=dict(size=14, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size title=None # Remove the built-in axis title since we're using annotations ) # Update x-axis with better formatting and fixed range fig.update_xaxes( showgrid=True, gridwidth=1, gridcolor='rgba(0,0,0,0.1)', # Set fixed range with April 17 as start date autorange=False, # Disable autoscaling range=[x_start_date, max_time], # Set fixed range from April 17 to max date tickformat="%b %d", # Simplified date format without time tickangle=-30, # Angle the labels for better readability tickfont=dict(size=14, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size title=None # Remove built-in title to use annotation instead ) # SIMPLIFIED APPROACH: Do a direct plot without markers for comparison # This creates a simple, reliable fallback plot if the advanced one fails try: # Write detailed debug information before saving the figure write_debug_info(df, fig) # Save the figure (still useful for reference) graph_file = "modius_apr_combined_graph.html" fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) # Also save as image for compatibility img_file = "modius_apr_combined_graph.png" try: fig.write_image(img_file) logger.info(f"Combined graph saved to {graph_file} and {img_file}") except Exception as e: logger.error(f"Error saving image: {e}") logger.info(f"Combined graph saved to {graph_file} only") # Return the figure object for direct use in Gradio return fig except Exception as e: # If the complex graph approach fails, create a simpler one logger.error(f"Error creating advanced graph: {e}") logger.info("Falling back to Simpler graph") # Create a simpler graph as fallback simple_fig = go.Figure() # Add zero line simple_fig.add_shape( type="line", line=dict(dash="solid", width=1.5, color="black"), y0=0, y1=0, x0=min_time, x1=max_time ) # Define colors for the fallback graph fallback_colors = px.colors.qualitative.Plotly # Simply plot the average APR data with moving average if not avg_apr_data.empty: # Sort by timestamp avg_apr_data = avg_apr_data.sort_values('timestamp') # Calculate both moving averages for the fallback graph avg_apr_data_with_ma = avg_apr_data.copy() avg_apr_data_with_ma['moving_avg'] = None # 2-hour window avg_apr_data_with_ma['infinite_avg'] = None # Infinite window # Define the time window (6 hours) time_window = pd.Timedelta(hours=6) # Calculate the moving averages for each timestamp for i, row in avg_apr_data_with_ma.iterrows(): current_time = row['timestamp'] window_start = current_time - time_window # Get all data points within the 2-hour time window window_data = apr_data[ (apr_data['timestamp'] >= window_start) & (apr_data['timestamp'] <= current_time) ] # Get all data points up to the current timestamp (infinite window) infinite_window_data = apr_data[ apr_data['timestamp'] <= current_time ] # Calculate the average APR for the 2-hour time window if not window_data.empty: avg_apr_data_with_ma.at[i, 'moving_avg'] = window_data['apr'].mean() else: # If no data points in the window, use the current value avg_apr_data_with_ma.at[i, 'moving_avg'] = row['apr'] # Calculate the average APR for the infinite window if not infinite_window_data.empty: avg_apr_data_with_ma.at[i, 'infinite_avg'] = infinite_window_data['apr'].mean() else: avg_apr_data_with_ma.at[i, 'infinite_avg'] = row['apr'] # Add data points for each agent, but only make top agents visible by default unique_agents = apr_data['agent_name'].unique() colors = px.colors.qualitative.Plotly[:len(unique_agents)] color_map = {agent: colors[i % len(colors)] for i, agent in enumerate(unique_agents)} # Calculate the total number of data points per agent agent_counts = apr_data['agent_name'].value_counts() # Determine how many agents to show individually (limit to top 5 most active) MAX_VISIBLE_AGENTS = 5 top_agents = agent_counts.nlargest(min(MAX_VISIBLE_AGENTS, len(agent_counts))).index.tolist() for agent_name in unique_agents: agent_data = apr_data[apr_data['agent_name'] == agent_name] # Determine if this agent should be visible by default is_visible = agent_name in top_agents # Add data points as markers simple_fig.add_trace( go.Scatter( x=agent_data['timestamp'], y=agent_data['apr'], mode='markers', name=f'Agent: {agent_name}', marker=dict( size=10, color=color_map[agent_name] ), hovertemplate='Time: %{x}
APR: %{y:.2f}
Agent: ' + agent_name + '', visible=is_visible # Only top agents visible by default ) ) # Add 2-hour moving average as a line simple_fig.add_trace( go.Scatter( x=avg_apr_data_with_ma['timestamp'], y=avg_apr_data_with_ma['moving_avg'], mode='lines', name='Average APR (6h window)', line=dict(width=2, color='red') # Thinner line ) ) # Add infinite window moving average as another line simple_fig.add_trace( go.Scatter( x=avg_apr_data_with_ma['timestamp'], y=avg_apr_data_with_ma['infinite_avg'], mode='lines', name='Cumulative Average APR (all data)', line=dict(width=4, color='green') # Thicker solid line ) ) # Simplified layout with adjusted y-axis range and increased size simple_fig.update_layout( title=dict( text="Modius Agents", font=dict( family="Arial, sans-serif", size=22, color="black", weight="bold" ) ), xaxis_title=None, # Remove x-axis title to use annotation instead yaxis_title=None, # Remove the y-axis title as we'll use annotations instead yaxis=dict( # No fixed range - let Plotly autoscale autorange=True, # Explicitly enable autoscaling tickformat=".2f", # Format tick labels with 2 decimal places tickfont=dict(size=12) # Larger font for tick labels ), height=600, # Reduced height for better fit # Removed fixed width to enable responsiveness autosize=True, # Enable auto-sizing for responsiveness template="plotly_white", # Use a cleaner template margin=dict(r=30, l=120, t=40, b=50) # Increased bottom margin for x-axis title ) # Add annotations for y-axis regions in the fallback graph simple_fig.add_annotation( x=-0.08, # Position further from the y-axis to avoid overlapping with tick labels y=-25, # Middle of the negative region xref="paper", yref="y", text="Percent drawdown [%]", showarrow=False, font=dict(size=14, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size textangle=-90, # Rotate text to be vertical align="center" ) simple_fig.add_annotation( x=-0.08, # Position further from the y-axis to avoid overlapping with tick labels y=50, # Middle of the positive region xref="paper", yref="y", text="Agent APR [%]", showarrow=False, font=dict(size=14, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size textangle=-90, # Rotate text to be vertical align="center" ) # Remove x-axis title annotation # simple_fig.add_annotation( # x=0.5, # Center of the x-axis # y=-0.15, # Below the x-axis # xref="paper", # yref="paper", # text="Date", # showarrow=False, # font=dict(size=14, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size # align="center" # ) # Update legend font for fallback graph simple_fig.update_layout( legend=dict( font=dict( family="Arial, sans-serif", size=14, # Adjusted font size color="black", weight="bold" ) ) ) # Apply fixed range to the x-axis for the fallback graph simple_fig.update_xaxes( autorange=False, # Disable autoscaling range=[x_start_date, max_time], # Set fixed range from April 17 tickformat="%b %d", # Simplified date format without time tickangle=-30, tickfont=dict(size=14, family="Arial, sans-serif", color="black", weight="bold"), # Adjusted font size title=None # Remove built-in title to use annotation instead ) # Update y-axis tick font for fallback graph simple_fig.update_yaxes( tickfont=dict(size=14, family="Arial, sans-serif", color="black", weight="bold") # Adjusted font size ) # Add a note about hidden agents if there are more than MAX_VISIBLE_AGENTS if len(unique_agents) > MAX_VISIBLE_AGENTS: simple_fig.add_annotation( text=f"Note: Only showing top {MAX_VISIBLE_AGENTS} agents by default. Toggle others in legend.", xref="paper", yref="paper", x=0.5, y=1.05, showarrow=False, font=dict(size=12, color="gray"), align="center" ) # Return the simple figure return simple_fig def save_to_csv(df): """Save the APR data DataFrame to a CSV file and return the file path""" if df.empty: logger.error("No APR data to save to CSV") return None # Define the CSV file path csv_file = "modius_apr_values.csv" # Save to CSV df.to_csv(csv_file, index=False) logger.info(f"APR data saved to {csv_file}") # Also generate a statistics CSV file stats_df = generate_statistics_from_data(df) stats_csv = "modius_apr_statistics.csv" stats_df.to_csv(stats_csv, index=False) logger.info(f"Statistics saved to {stats_csv}") # Log detailed statistics about adjusted APR if 'adjusted_apr' in df.columns and df['adjusted_apr'].notna().any(): adjusted_stats = stats_df[stats_df['avg_adjusted_apr'].notna()] logger.info(f"Agents with adjusted APR data: {len(adjusted_stats)} out of {len(stats_df)}") for _, row in adjusted_stats.iterrows(): if row['agent_id'] != 'ALL': # Skip the overall stats row logger.info(f"Agent {row['agent_name']} adjusted APR stats: avg={row['avg_adjusted_apr']:.2f}, min={row['min_adjusted_apr']:.2f}, max={row['max_adjusted_apr']:.2f}") # Log overall adjusted APR stats overall_row = stats_df[stats_df['agent_id'] == 'ALL'] if not overall_row.empty and pd.notna(overall_row['avg_adjusted_apr'].iloc[0]): logger.info(f"Overall adjusted APR stats: avg={overall_row['avg_adjusted_apr'].iloc[0]:.2f}, min={overall_row['min_adjusted_apr'].iloc[0]:.2f}, max={overall_row['max_adjusted_apr'].iloc[0]:.2f}") return csv_file def generate_statistics_from_data(df): """Generate statistics from the APR data""" if df.empty: return pd.DataFrame() # Get unique agents unique_agents = df['agent_id'].unique() stats_list = [] # Generate per-agent statistics for agent_id in unique_agents: agent_data = df[df['agent_id'] == agent_id] agent_name = agent_data['agent_name'].iloc[0] # APR statistics apr_data = agent_data[agent_data['metric_type'] == 'APR'] real_apr = apr_data[apr_data['is_dummy'] == False] # Performance statistics perf_data = agent_data[agent_data['metric_type'] == 'Performance'] real_perf = perf_data[perf_data['is_dummy'] == False] # Check if adjusted_apr exists and has non-null values has_adjusted_apr = 'adjusted_apr' in apr_data.columns and apr_data['adjusted_apr'].notna().any() stats = { 'agent_id': agent_id, 'agent_name': agent_name, 'total_points': len(agent_data), 'apr_points': len(apr_data), 'performance_points': len(perf_data), 'real_apr_points': len(real_apr), 'real_performance_points': len(real_perf), 'avg_apr': apr_data['apr'].mean() if not apr_data.empty else None, 'avg_performance': perf_data['apr'].mean() if not perf_data.empty else None, 'max_apr': apr_data['apr'].max() if not apr_data.empty else None, 'min_apr': apr_data['apr'].min() if not apr_data.empty else None, 'avg_adjusted_apr': apr_data['adjusted_apr'].mean() if has_adjusted_apr else None, 'max_adjusted_apr': apr_data['adjusted_apr'].max() if has_adjusted_apr else None, 'min_adjusted_apr': apr_data['adjusted_apr'].min() if has_adjusted_apr else None, 'latest_timestamp': agent_data['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not agent_data.empty else None } stats_list.append(stats) # Generate overall statistics apr_only = df[df['metric_type'] == 'APR'] perf_only = df[df['metric_type'] == 'Performance'] # Check if adjusted_apr exists and has non-null values for overall stats has_adjusted_apr_overall = 'adjusted_apr' in apr_only.columns and apr_only['adjusted_apr'].notna().any() overall_stats = { 'agent_id': 'ALL', 'agent_name': 'All Agents', 'total_points': len(df), 'apr_points': len(apr_only), 'performance_points': len(perf_only), 'real_apr_points': len(apr_only[apr_only['is_dummy'] == False]), 'real_performance_points': len(perf_only[perf_only['is_dummy'] == False]), 'avg_apr': apr_only['apr'].mean() if not apr_only.empty else None, 'avg_performance': perf_only['apr'].mean() if not perf_only.empty else None, 'max_apr': apr_only['apr'].max() if not apr_only.empty else None, 'min_apr': apr_only['apr'].min() if not apr_only.empty else None, 'avg_adjusted_apr': apr_only['adjusted_apr'].mean() if has_adjusted_apr_overall else None, 'max_adjusted_apr': apr_only['adjusted_apr'].max() if has_adjusted_apr_overall else None, 'min_adjusted_apr': apr_only['adjusted_apr'].min() if has_adjusted_apr_overall else None, 'latest_timestamp': df['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not df.empty else None } stats_list.append(overall_stats) return pd.DataFrame(stats_list) # Create dummy functions for the commented out imports def create_transcation_visualizations(): """Dummy implementation that returns a placeholder graph""" fig = go.Figure() fig.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) return fig def create_active_agents_visualizations(): """Dummy implementation that returns a placeholder graph""" fig = go.Figure() fig.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) return fig # Comment out the blockchain connection code """ # Load environment variables from .env file # RPC URLs OPTIMISM_RPC_URL = os.getenv('OPTIMISM_RPC_URL') MODE_RPC_URL = os.getenv('MODE_RPC_URL') # Initialize Web3 instances web3_instances = { 'optimism': Web3(Web3.HTTPProvider(OPTIMISM_RPC_URL)), 'mode': Web3(Web3.HTTPProvider(MODE_RPC_URL)) } # Contract addresses for service registries contract_addresses = { 'optimism': '0x3d77596beb0f130a4415df3D2D8232B3d3D31e44', 'mode': '0x3C1fF68f5aa342D296d4DEe4Bb1cACCA912D95fE' } # Load the ABI from the provided JSON file with open('./contracts/service_registry_abi.json', 'r') as abi_file: contract_abi = json.load(abi_file) # Create the contract instances service_registries = { chain_name: web3.eth.contract(address=contract_addresses[chain_name], abi=contract_abi) for chain_name, web3 in web3_instances.items() } # Check if connections are successful for chain_name, web3_instance in web3_instances.items(): if not web3_instance.is_connected(): raise Exception(f"Failed to connect to the {chain_name.capitalize()} network.") else: print(f"Successfully connected to the {chain_name.capitalize()} network.") """ # Dummy blockchain functions to replace the commented ones def get_transfers(integrator: str, wallet: str) -> str: """Dummy function that returns an empty result""" return {"transfers": []} def fetch_and_aggregate_transactions(): """Dummy function that returns empty data""" return [], {} # Function to parse the transaction data and prepare it for visualization def process_transactions_and_agents(data): """Dummy function that returns empty dataframes""" df_transactions = pd.DataFrame() df_agents = pd.DataFrame(columns=['date', 'agent_count']) df_agents_weekly = pd.DataFrame() return df_transactions, df_agents, df_agents_weekly # Function to create visualizations based on the metrics def create_visualizations(): """ # Commenting out the original visualization code temporarily for debugging transactions_data = fetch_and_aggregate_transactions() df_transactions, df_agents, df_agents_weekly = process_transactions_and_agents(transactions_data) # Fetch daily value locked data df_tvl = pd.read_csv('daily_value_locked.csv') # Calculate total value locked per chain per day df_tvl["total_value_locked_usd"] = df_tvl["amount0_usd"] + df_tvl["amount1_usd"] df_tvl_daily = df_tvl.groupby(["date", "chain_name"])["total_value_locked_usd"].sum().reset_index() df_tvl_daily['date'] = pd.to_datetime(df_tvl_daily['date']) # Filter out dates with zero total value locked df_tvl_daily = df_tvl_daily[df_tvl_daily["total_value_locked_usd"] > 0] chain_name_map = { "mode": "Mode", "base": "Base", "ethereum": "Ethereum", "optimism": "Optimism" } df_tvl_daily["chain_name"] = df_tvl_daily["chain_name"].map(chain_name_map) # Plot total value locked fig_tvl = px.bar( df_tvl_daily, x="date", y="total_value_locked_usd", color="chain_name", opacity=0.7, title="Total Volume Invested in Pools in Different Chains Daily", labels={"date": "Date","chain_name": "Transaction Chain", "total_value_locked_usd": "Total Volume Invested (USD)"}, barmode='stack', color_discrete_map={ "Mode": "orange", "Base": "purple", "Ethereum": "darkgreen", "Optimism": "blue" } ) fig_tvl.update_layout( xaxis_title="Date", yaxis=dict(tickmode='linear', tick0=0, dtick=4), xaxis=dict( tickmode='array', tickvals=df_tvl_daily['date'], ticktext=df_tvl_daily['date'].dt.strftime('%b %d'), tickangle=-45, ), bargap=0.6, # Increase gap between bar groups (0-1) bargroupgap=0.1, # Decrease gap between bars in a group (0-1) height=600, width=1200, # Specify width to prevent bars from being too wide showlegend=True, template='plotly_white' ) fig_tvl.update_xaxes(tickformat="%b %d") chain_name_map = { 10: "Optimism", 8453: "Base", 1: "Ethereum", 34443: "Mode" } df_transactions["sending_chain"] = df_transactions["sending_chain"].map(chain_name_map) df_transactions["receiving_chain"] = df_transactions["receiving_chain"].map(chain_name_map) df_transactions["sending_chain"] = df_transactions["sending_chain"].astype(str) df_transactions["receiving_chain"] = df_transactions["receiving_chain"].astype(str) df_transactions['date'] = pd.to_datetime(df_transactions['date']) df_transactions["is_swap"] = df_transactions.apply(lambda x: x["sending_chain"] == x["receiving_chain"], axis=1) swaps_per_chain = df_transactions[df_transactions["is_swap"]].groupby(["date", "sending_chain"]).size().reset_index(name="swap_count") fig_swaps_chain = px.bar( swaps_per_chain, x="date", y="swap_count", color="sending_chain", title="Chain Daily Activity: Swaps", labels={"sending_chain": "Transaction Chain", "swap_count": "Daily Swap Nr"}, barmode="stack", opacity=0.7, color_discrete_map={ "Optimism": "blue", "Ethereum": "darkgreen", "Base": "purple", "Mode": "orange" } ) fig_swaps_chain.update_layout( xaxis_title="Date", yaxis_title="Daily Swap Count", yaxis=dict(tickmode='linear', tick0=0, dtick=1), xaxis=dict( tickmode='array', tickvals=[d for d in swaps_per_chain['date']], ticktext=[d.strftime('%m-%d') for d in swaps_per_chain['date']], tickangle=-45, ), bargap=0.6, bargroupgap=0.1, height=600, width=1200, margin=dict(l=50, r=50, t=50, b=50), showlegend=True, legend=dict( yanchor="top", y=0.99, xanchor="right", x=0.99 ), template='plotly_white' ) fig_swaps_chain.update_xaxes(tickformat="%m-%d") df_transactions["is_bridge"] = df_transactions.apply(lambda x: x["sending_chain"] != x["receiving_chain"], axis=1) bridges_per_chain = df_transactions[df_transactions["is_bridge"]].groupby(["date", "sending_chain"]).size().reset_index(name="bridge_count") fig_bridges_chain = px.bar( bridges_per_chain, x="date", y="bridge_count", color="sending_chain", title="Chain Daily Activity: Bridges", labels={"sending_chain": "Transaction Chain", "bridge_count": "Daily Bridge Nr"}, barmode="stack", opacity=0.7, color_discrete_map={ "Optimism": "blue", "Ethereum": "darkgreen", "Base": "purple", "Mode": "orange" } ) fig_bridges_chain.update_layout( xaxis_title="Date", yaxis_title="Daily Bridge Count", yaxis=dict(tickmode='linear', tick0=0, dtick=1), xaxis=dict( tickmode='array', tickvals=[d for d in bridges_per_chain['date']], ticktext=[d.strftime('%m-%d') for d in bridges_per_chain['date']], tickangle=-45, ), bargap=0.6, bargroupgap=0.1, height=600, width=1200, margin=dict(l=50, r=50, t=50, b=50), showlegend=True, legend=dict( yanchor="top", y=0.99, xanchor="right", x=0.99 ), template='plotly_white' ) fig_bridges_chain.update_xaxes(tickformat="%m-%d") df_agents['date'] = pd.to_datetime(df_agents['date']) daily_agents_df = df_agents.groupby('date').agg({'agent_count': 'sum'}).reset_index() daily_agents_df.rename(columns={'agent_count': 'daily_agent_count'}, inplace=True) # Sort by date to ensure proper running total calculation daily_agents_df = daily_agents_df.sort_values('date') # Create week column daily_agents_df['week'] = daily_agents_df['date'].dt.to_period('W').apply(lambda r: r.start_time) # Calculate running total within each week daily_agents_df['running_weekly_total'] = daily_agents_df.groupby('week')['daily_agent_count'].cumsum() # Create final merged dataframe weekly_merged_df = daily_agents_df.copy() adjustment_date = pd.to_datetime('2024-11-15') weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'daily_agent_count'] -= 1 weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'running_weekly_total'] -= 1 fig_agents_registered = go.Figure(data=[ go.Bar( name='Daily nr of Registered Agents', x=weekly_merged_df['date'].dt.strftime("%b %d"), y=weekly_merged_df['daily_agent_count'], opacity=0.7, marker_color='blue' ), go.Bar( name='Weekly Nr of Registered Agents', x=weekly_merged_df['date'].dt.strftime("%b %d"), y=weekly_merged_df['running_weekly_total'], opacity=0.7, marker_color='purple' ) ]) fig_agents_registered.update_layout( xaxis_title='Date', yaxis_title='Number of Agents', title="Nr of Agents Registered", barmode='group', yaxis=dict(tickmode='linear', tick0=0, dtick=1), xaxis=dict( categoryorder='array', categoryarray=weekly_merged_df['date'].dt.strftime("%b %d"), tickangle=-45 ), bargap=0.3, height=600, width=1200, showlegend=True, legend=dict( yanchor="top", xanchor="right", ), template='plotly_white', ) return fig_swaps_chain, fig_bridges_chain, fig_agents_registered,fig_tvl """ # Placeholder figures for testing fig_swaps_chain = go.Figure() fig_swaps_chain.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) fig_bridges_chain = go.Figure() fig_bridges_chain.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) fig_agents_registered = go.Figure() fig_agents_registered.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) fig_tvl = go.Figure() fig_tvl.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl # Modify dashboard function to make the plot container responsive def dashboard(): with gr.Blocks() as demo: gr.Markdown("# Average Modius Agent Performance") # APR Metrics tab - the only tab with gr.Tab("APR Metrics"): with gr.Column(): refresh_btn = gr.Button("Refresh APR Data") # Create container for plotly figure with responsive sizing with gr.Column(): combined_graph = gr.Plot(label="APR for All Agents", elem_id="responsive_plot") # Create compact toggle controls at the bottom of the graph with gr.Row(visible=True): gr.Markdown("##### Toggle Graph Lines", elem_id="toggle_title") with gr.Row(): with gr.Column(): with gr.Row(elem_id="toggle_container"): with gr.Column(scale=1, min_width=150): apr_toggle = gr.Checkbox(label="APR Average", value=True, elem_id="apr_toggle") with gr.Column(scale=1, min_width=150): adjusted_apr_toggle = gr.Checkbox(label="ETH Adjusted APR Average", value=True, elem_id="adjusted_apr_toggle") # Add custom CSS for making the plot responsive gr.HTML(""" """) # Function to update the graph def update_apr_graph(show_apr_ma=True, show_adjusted_apr_ma=True): # Generate visualization and get figure object directly try: combined_fig, _ = generate_apr_visualizations() # Update visibility of traces based on toggle values for i, trace in enumerate(combined_fig.data): # Check if this is a moving average trace if trace.name == 'Average APR (3d window)': trace.visible = show_apr_ma elif trace.name == 'Average ETH Adjusted APR (3d window)': trace.visible = show_adjusted_apr_ma return combined_fig except Exception as e: logger.exception("Error generating APR visualization") # Create error figure error_fig = go.Figure() error_fig.add_annotation( text=f"Error: {str(e)}", x=0.5, y=0.5, showarrow=False, font=dict(size=15, color="red") ) return error_fig # Initialize the graph on load with a placeholder placeholder_fig = go.Figure() placeholder_fig.add_annotation( text="Click 'Refresh APR Data' to load APR graph", x=0.5, y=0.5, showarrow=False, font=dict(size=15) ) combined_graph.value = placeholder_fig # Function to update the graph based on toggle states def update_graph_with_toggles(apr_visible, adjusted_apr_visible): return update_apr_graph(apr_visible, adjusted_apr_visible) # Function to update the graph without parameters (for refresh button) def refresh_graph(): return update_apr_graph(apr_toggle.value, adjusted_apr_toggle.value) # Set up the button click event refresh_btn.click(fn=refresh_graph, inputs=None, outputs=[combined_graph]) # Set up the toggle switch events apr_toggle.change( fn=update_graph_with_toggles, inputs=[apr_toggle, adjusted_apr_toggle], outputs=[combined_graph] ) adjusted_apr_toggle.change( fn=update_graph_with_toggles, inputs=[apr_toggle, adjusted_apr_toggle], outputs=[combined_graph] ) return demo # Launch the dashboard if __name__ == "__main__": dashboard().launch()