import requests import pandas as pd import gradio as gr import plotly.graph_objects as go import plotly.express as px from plotly.subplots import make_subplots from datetime import datetime, timedelta import json # Commenting out blockchain-related imports that cause loading issues # from web3 import Web3 import os import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import random import logging from typing import List, Dict, Any # Comment out the import for now and replace with dummy functions # from app_trans_new import create_transcation_visualizations,create_active_agents_visualizations # APR visualization functions integrated directly logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger(__name__) # Global variable to store the data for reuse global_df = None # Configuration API_BASE_URL = "https://afmdb.autonolas.tech" def get_agent_type_by_name(type_name: str) -> Dict[str, Any]: """Get agent type by name""" response = requests.get(f"{API_BASE_URL}/api/agent-types/name/{type_name}") if response.status_code == 404: logger.error(f"Agent type '{type_name}' not found") return None response.raise_for_status() return response.json() def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]: """Get attribute definition by name""" response = requests.get(f"{API_BASE_URL}/api/attributes/name/{attr_name}") if response.status_code == 404: logger.error(f"Attribute definition '{attr_name}' not found") return None response.raise_for_status() return response.json() def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]: """Get all agents of a specific type""" response = requests.get(f"{API_BASE_URL}/api/agent-types/{type_id}/agents/") if response.status_code == 404: logger.error(f"No agents found for type ID {type_id}") return [] response.raise_for_status() return response.json() def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]: """Get all attribute values for a specific attribute definition across all agents of a given list""" all_attributes = [] # For each agent, get their attributes and filter for the one we want for agent in agents: agent_id = agent["agent_id"] # Call the /api/agents/{agent_id}/attributes/ endpoint response = requests.get(f"{API_BASE_URL}/api/agents/{agent_id}/attributes/", params={"limit": 1000}) if response.status_code == 404: logger.error(f"No attributes found for agent ID {agent_id}") continue try: response.raise_for_status() agent_attrs = response.json() # Filter for the specific attribute definition ID filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id] all_attributes.extend(filtered_attrs) except requests.exceptions.RequestException as e: logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}") return all_attributes def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str: """Get agent name from agent ID""" for agent in agents: if agent["agent_id"] == agent_id: return agent["agent_name"] return "Unknown" def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]: """Extract APR value and timestamp from JSON value""" try: # The APR value is stored in the json_value field if attr["json_value"] is None: return {"apr": None, "timestamp": None, "agent_id": attr["agent_id"], "is_dummy": False} # If json_value is a string, parse it if isinstance(attr["json_value"], str): json_data = json.loads(attr["json_value"]) else: json_data = attr["json_value"] apr = json_data.get("apr") timestamp = json_data.get("timestamp") # Convert timestamp to datetime if it exists timestamp_dt = None if timestamp: timestamp_dt = datetime.fromtimestamp(timestamp) return {"apr": apr, "timestamp": timestamp_dt, "agent_id": attr["agent_id"], "is_dummy": False} except (json.JSONDecodeError, KeyError, TypeError) as e: logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}") return {"apr": None, "timestamp": None, "agent_id": attr["agent_id"], "is_dummy": False} def fetch_apr_data_from_db(): """ Fetch APR data from database using the API. """ global global_df try: # Step 1: Find the Modius agent type modius_type = get_agent_type_by_name("Modius") if not modius_type: logger.error("Modius agent type not found, using placeholder data") global_df = pd.DataFrame([]) return global_df type_id = modius_type["type_id"] # Step 2: Find the APR attribute definition apr_attr_def = get_attribute_definition_by_name("APR") if not apr_attr_def: logger.error("APR attribute definition not found, using placeholder data") global_df = pd.DataFrame([]) return global_df attr_def_id = apr_attr_def["attr_def_id"] # Step 3: Get all agents of type Modius modius_agents = get_agents_by_type(type_id) if not modius_agents: logger.error("No agents of type 'Modius' found") global_df = pd.DataFrame([]) return global_df # Step 4: Fetch all APR values for Modius agents apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id) if not apr_attributes: logger.error("No APR values found for 'Modius' agents") global_df = pd.DataFrame([]) return global_df # Step 5: Extract APR data apr_data_list = [] for attr in apr_attributes: apr_data = extract_apr_value(attr) if apr_data["apr"] is not None and apr_data["timestamp"] is not None: # Get agent name agent_name = get_agent_name(attr["agent_id"], modius_agents) # Add agent name to the data apr_data["agent_name"] = agent_name # Add is_dummy flag (all real data) apr_data["is_dummy"] = False # Mark negative values as "Performance" metrics if apr_data["apr"] < 0: apr_data["metric_type"] = "Performance" else: apr_data["metric_type"] = "APR" apr_data_list.append(apr_data) # Convert list of dictionaries to DataFrame if not apr_data_list: logger.error("No valid APR data extracted") global_df = pd.DataFrame([]) return global_df global_df = pd.DataFrame(apr_data_list) return global_df except requests.exceptions.RequestException as e: logger.error(f"API request error: {e}") global_df = pd.DataFrame([]) return global_df except Exception as e: logger.error(f"Error fetching APR data: {e}") global_df = pd.DataFrame([]) return global_df def generate_apr_visualizations(): """Generate APR visualizations with real data only (no dummy data)""" global global_df # Fetch data from database df = fetch_apr_data_from_db() # If we got no data at all, return placeholder figures if df.empty: logger.info("No APR data available. Using fallback visualization.") # Create empty visualizations with a message using Plotly fig = go.Figure() fig.add_annotation( x=0.5, y=0.5, text="No APR data available", font=dict(size=20), showarrow=False ) fig.update_layout( xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False) ) # Save as static files for reference fig.write_html("modius_apr_per_agent_graph.html") fig.write_image("modius_apr_per_agent_graph.png") fig.write_html("modius_apr_combined_graph.html") fig.write_image("modius_apr_combined_graph.png") csv_file = None return fig, fig, csv_file # No longer generating dummy data # Set global_df for access by other functions global_df = df print(global_df.head()) print(df.head()) # Save to CSV before creating visualizations csv_file = save_to_csv(df) # Create per-agent time series graph (returns figure object) per_agent_fig = create_time_series_graph_per_agent(df) # Create combined time series graph (returns figure object) combined_fig = create_combined_time_series_graph(df) return per_agent_fig, combined_fig, csv_file def create_time_series_graph_per_agent(df): """Create a time series graph for each agent using Plotly""" # Get unique agents unique_agents = df['agent_id'].unique() if len(unique_agents) == 0: logger.error("No agent data to plot") fig = go.Figure() fig.add_annotation( text="No agent data available", x=0.5, y=0.5, showarrow=False, font=dict(size=20) ) return fig # Create a subplot figure for each agent fig = make_subplots(rows=len(unique_agents), cols=1, subplot_titles=[f"Agent: {df[df['agent_id'] == agent_id]['agent_name'].iloc[0]}" for agent_id in unique_agents], vertical_spacing=0.1) # Plot data for each agent for i, agent_id in enumerate(unique_agents): agent_data = df[df['agent_id'] == agent_id].copy() agent_name = agent_data['agent_name'].iloc[0] row = i + 1 # Add zero line to separate APR and Performance fig.add_shape( type="line", line=dict(dash="solid", width=1.5, color="black"), y0=0, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), row=row, col=1 ) # Add background colors fig.add_shape( type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0), y0=0, y1=1000, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), row=row, col=1, layer="below" ) fig.add_shape( type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0), y0=-1000, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), row=row, col=1, layer="below" ) # Create separate dataframes for different data types apr_data = agent_data[agent_data['metric_type'] == 'APR'] perf_data = agent_data[agent_data['metric_type'] == 'Performance'] # Sort all data by timestamp for the line plots combined_agent_data = agent_data.sort_values('timestamp') # Add main line connecting all points fig.add_trace( go.Scatter( x=combined_agent_data['timestamp'], y=combined_agent_data['apr'], mode='lines', line=dict(color='purple', width=2), name=f'{agent_name}', legendgroup=agent_name, showlegend=(i == 0), # Only show in legend once hovertemplate='Time: %{x}
Value: %{y:.2f}' ), row=row, col=1 ) # Add scatter points for APR values if not apr_data.empty: fig.add_trace( go.Scatter( x=apr_data['timestamp'], y=apr_data['apr'], mode='markers', marker=dict(color='blue', size=10, symbol='circle'), name='APR', legendgroup='APR', showlegend=(i == 0), hovertemplate='Time: %{x}
APR: %{y:.2f}' ), row=row, col=1 ) # Add scatter points for Performance values if not perf_data.empty: fig.add_trace( go.Scatter( x=perf_data['timestamp'], y=perf_data['apr'], mode='markers', marker=dict(color='red', size=10, symbol='square'), name='Performance', legendgroup='Performance', showlegend=(i == 0), hovertemplate='Time: %{x}
Performance: %{y:.2f}' ), row=row, col=1 ) # Update axes fig.update_xaxes(title_text="Time", row=row, col=1) fig.update_yaxes(title_text="Value", row=row, col=1, gridcolor='rgba(0,0,0,0.1)') # Update layout fig.update_layout( height=400 * len(unique_agents), width=1000, title_text="APR and Performance Values per Agent", template="plotly_white", legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1 ), margin=dict(r=20, l=20, t=30, b=20), hovermode="closest" ) # Save the figure (still useful for reference) graph_file = "modius_apr_per_agent_graph.html" fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) # Also save as image for compatibility img_file = "modius_apr_per_agent_graph.png" fig.write_image(img_file) logger.info(f"Per-agent graph saved to {graph_file} and {img_file}") # Return the figure object for direct use in Gradio return fig def create_combined_time_series_graph(df): """Create a combined time series graph for all agents using Plotly""" if len(df) == 0: logger.error("No data to plot combined graph") fig = go.Figure() fig.add_annotation( text="No data available", x=0.5, y=0.5, showarrow=False, font=dict(size=20) ) return fig # Create Plotly figure fig = go.Figure() # Get unique agents unique_agents = df['agent_id'].unique() # Define a color scale for different agents colors = px.colors.qualitative.Plotly[:len(unique_agents)] # Add background shapes for APR and Performance regions min_time = df['timestamp'].min() max_time = df['timestamp'].max() # Add shape for APR region (above zero) fig.add_shape( type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0), y0=0, y1=1000, x0=min_time, x1=max_time, layer="below" ) # Add shape for Performance region (below zero) fig.add_shape( type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0), y0=-1000, y1=0, x0=min_time, x1=max_time, layer="below" ) # Add zero line fig.add_shape( type="line", line=dict(dash="solid", width=1.5, color="black"), y0=0, y1=0, x0=min_time, x1=max_time ) # Add data for each agent for i, agent_id in enumerate(unique_agents): agent_data = df[df['agent_id'] == agent_id].copy() agent_name = agent_data['agent_name'].iloc[0] color = colors[i % len(colors)] # Sort the data by timestamp agent_data = agent_data.sort_values('timestamp') print("agent_data_combined",agent_data) # Add the combined line for both APR and Performance fig.add_trace( go.Scatter( x=agent_data['timestamp'], y=agent_data['apr'], mode='lines', line=dict(color=color, width=2), name=f'{agent_name}', legendgroup=agent_name, hovertemplate='Time: %{x}
Value: %{y:.2f}
Agent: ' + agent_name + '' ) ) # Add scatter points for APR values apr_data = agent_data[agent_data['metric_type'] == 'APR'] print("apr_data_combined",apr_data) if not apr_data.empty: fig.add_trace( go.Scatter( x=apr_data['timestamp'], y=apr_data['apr'], mode='markers', marker=dict(color=color, symbol='circle', size=8), name=f'{agent_name} APR', legendgroup=agent_name, showlegend=False, hovertemplate='Time: %{x}
APR: %{y:.2f}
Agent: ' + agent_name + '' ) ) # Add scatter points for Performance values perf_data = agent_data[agent_data['metric_type'] == 'Performance'] print("perf_data_combined",perf_data) if not perf_data.empty: fig.add_trace( go.Scatter( x=perf_data['timestamp'], y=perf_data['apr'], mode='markers', marker=dict(color=color, symbol='square', size=8), name=f'{agent_name} Perf', legendgroup=agent_name, showlegend=False, hovertemplate='Time: %{x}
Performance: %{y:.2f}
Agent: ' + agent_name + '' ) ) # Update layout fig.update_layout( title="APR and Performance Values for All Agents", xaxis_title="Time", yaxis_title="Value", template="plotly_white", height=600, width=1000, legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1, groupclick="toggleitem" ), margin=dict(r=20, l=20, t=30, b=20), hovermode="closest" ) # Update axes fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='rgba(0,0,0,0.1)') fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='rgba(0,0,0,0.1)') # Save the figure (still useful for reference) graph_file = "modius_apr_combined_graph.html" fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) # Also save as image for compatibility img_file = "modius_apr_combined_graph.png" fig.write_image(img_file) logger.info(f"Combined graph saved to {graph_file} and {img_file}") # Return the figure object for direct use in Gradio return fig def save_to_csv(df): """Save the APR data DataFrame to a CSV file and return the file path""" if df.empty: logger.error("No APR data to save to CSV") return None # Define the CSV file path csv_file = "modius_apr_values.csv" # Save to CSV df.to_csv(csv_file, index=False) logger.info(f"APR data saved to {csv_file}") # Also generate a statistics CSV file stats_df = generate_statistics_from_data(df) stats_csv = "modius_apr_statistics.csv" stats_df.to_csv(stats_csv, index=False) logger.info(f"Statistics saved to {stats_csv}") return csv_file def generate_statistics_from_data(df): """Generate statistics from the APR data""" if df.empty: return pd.DataFrame() # Get unique agents unique_agents = df['agent_id'].unique() stats_list = [] # Generate per-agent statistics for agent_id in unique_agents: agent_data = df[df['agent_id'] == agent_id] agent_name = agent_data['agent_name'].iloc[0] # APR statistics apr_data = agent_data[agent_data['metric_type'] == 'APR'] real_apr = apr_data[apr_data['is_dummy'] == False] # Performance statistics perf_data = agent_data[agent_data['metric_type'] == 'Performance'] real_perf = perf_data[perf_data['is_dummy'] == False] stats = { 'agent_id': agent_id, 'agent_name': agent_name, 'total_points': len(agent_data), 'apr_points': len(apr_data), 'performance_points': len(perf_data), 'real_apr_points': len(real_apr), 'real_performance_points': len(real_perf), 'avg_apr': apr_data['apr'].mean() if not apr_data.empty else None, 'avg_performance': perf_data['apr'].mean() if not perf_data.empty else None, 'max_apr': apr_data['apr'].max() if not apr_data.empty else None, 'min_apr': apr_data['apr'].min() if not apr_data.empty else None, 'latest_timestamp': agent_data['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not agent_data.empty else None } stats_list.append(stats) # Generate overall statistics apr_only = df[df['metric_type'] == 'APR'] perf_only = df[df['metric_type'] == 'Performance'] overall_stats = { 'agent_id': 'ALL', 'agent_name': 'All Agents', 'total_points': len(df), 'apr_points': len(apr_only), 'performance_points': len(perf_only), 'real_apr_points': len(apr_only[apr_only['is_dummy'] == False]), 'real_performance_points': len(perf_only[perf_only['is_dummy'] == False]), 'avg_apr': apr_only['apr'].mean() if not apr_only.empty else None, 'avg_performance': perf_only['apr'].mean() if not perf_only.empty else None, 'max_apr': apr_only['apr'].max() if not apr_only.empty else None, 'min_apr': apr_only['apr'].min() if not apr_only.empty else None, 'latest_timestamp': df['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not df.empty else None } stats_list.append(overall_stats) return pd.DataFrame(stats_list) # Create dummy functions for the commented out imports def create_transcation_visualizations(): """Dummy implementation that returns a placeholder graph""" fig = go.Figure() fig.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) return fig def create_active_agents_visualizations(): """Dummy implementation that returns a placeholder graph""" fig = go.Figure() fig.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) return fig # Comment out the blockchain connection code """ # Load environment variables from .env file # RPC URLs OPTIMISM_RPC_URL = os.getenv('OPTIMISM_RPC_URL') MODE_RPC_URL = os.getenv('MODE_RPC_URL') # Initialize Web3 instances web3_instances = { 'optimism': Web3(Web3.HTTPProvider(OPTIMISM_RPC_URL)), 'mode': Web3(Web3.HTTPProvider(MODE_RPC_URL)) } # Contract addresses for service registries contract_addresses = { 'optimism': '0x3d77596beb0f130a4415df3D2D8232B3d3D31e44', 'mode': '0x3C1fF68f5aa342D296d4DEe4Bb1cACCA912D95fE' } # Load the ABI from the provided JSON file with open('./contracts/service_registry_abi.json', 'r') as abi_file: contract_abi = json.load(abi_file) # Create the contract instances service_registries = { chain_name: web3.eth.contract(address=contract_addresses[chain_name], abi=contract_abi) for chain_name, web3 in web3_instances.items() } # Check if connections are successful for chain_name, web3_instance in web3_instances.items(): if not web3_instance.is_connected(): raise Exception(f"Failed to connect to the {chain_name.capitalize()} network.") else: print(f"Successfully connected to the {chain_name.capitalize()} network.") """ # Dummy blockchain functions to replace the commented ones def get_transfers(integrator: str, wallet: str) -> str: """Dummy function that returns an empty result""" return {"transfers": []} def fetch_and_aggregate_transactions(): """Dummy function that returns empty data""" return [], {} # Function to parse the transaction data and prepare it for visualization def process_transactions_and_agents(data): """Dummy function that returns empty dataframes""" df_transactions = pd.DataFrame() df_agents = pd.DataFrame(columns=['date', 'agent_count']) df_agents_weekly = pd.DataFrame() return df_transactions, df_agents, df_agents_weekly # Function to create visualizations based on the metrics def create_visualizations(): """ # Commenting out the original visualization code temporarily for debugging transactions_data = fetch_and_aggregate_transactions() df_transactions, df_agents, df_agents_weekly = process_transactions_and_agents(transactions_data) # Fetch daily value locked data df_tvl = pd.read_csv('daily_value_locked.csv') # Calculate total value locked per chain per day df_tvl["total_value_locked_usd"] = df_tvl["amount0_usd"] + df_tvl["amount1_usd"] df_tvl_daily = df_tvl.groupby(["date", "chain_name"])["total_value_locked_usd"].sum().reset_index() df_tvl_daily['date'] = pd.to_datetime(df_tvl_daily['date']) # Filter out dates with zero total value locked df_tvl_daily = df_tvl_daily[df_tvl_daily["total_value_locked_usd"] > 0] chain_name_map = { "mode": "Mode", "base": "Base", "ethereum": "Ethereum", "optimism": "Optimism" } df_tvl_daily["chain_name"] = df_tvl_daily["chain_name"].map(chain_name_map) # Plot total value locked fig_tvl = px.bar( df_tvl_daily, x="date", y="total_value_locked_usd", color="chain_name", opacity=0.7, title="Total Volume Invested in Pools in Different Chains Daily", labels={"date": "Date","chain_name": "Transaction Chain", "total_value_locked_usd": "Total Volume Invested (USD)"}, barmode='stack', color_discrete_map={ "Mode": "orange", "Base": "purple", "Ethereum": "darkgreen", "Optimism": "blue" } ) fig_tvl.update_layout( xaxis_title="Date", yaxis=dict(tickmode='linear', tick0=0, dtick=4), xaxis=dict( tickmode='array', tickvals=df_tvl_daily['date'], ticktext=df_tvl_daily['date'].dt.strftime('%b %d'), tickangle=-45, ), bargap=0.6, # Increase gap between bar groups (0-1) bargroupgap=0.1, # Decrease gap between bars in a group (0-1) height=600, width=1200, # Specify width to prevent bars from being too wide showlegend=True, template='plotly_white' ) fig_tvl.update_xaxes(tickformat="%b %d") chain_name_map = { 10: "Optimism", 8453: "Base", 1: "Ethereum", 34443: "Mode" } df_transactions["sending_chain"] = df_transactions["sending_chain"].map(chain_name_map) df_transactions["receiving_chain"] = df_transactions["receiving_chain"].map(chain_name_map) df_transactions["sending_chain"] = df_transactions["sending_chain"].astype(str) df_transactions["receiving_chain"] = df_transactions["receiving_chain"].astype(str) df_transactions['date'] = pd.to_datetime(df_transactions['date']) df_transactions["is_swap"] = df_transactions.apply(lambda x: x["sending_chain"] == x["receiving_chain"], axis=1) swaps_per_chain = df_transactions[df_transactions["is_swap"]].groupby(["date", "sending_chain"]).size().reset_index(name="swap_count") fig_swaps_chain = px.bar( swaps_per_chain, x="date", y="swap_count", color="sending_chain", title="Chain Daily Activity: Swaps", labels={"sending_chain": "Transaction Chain", "swap_count": "Daily Swap Nr"}, barmode="stack", opacity=0.7, color_discrete_map={ "Optimism": "blue", "Ethereum": "darkgreen", "Base": "purple", "Mode": "orange" } ) fig_swaps_chain.update_layout( xaxis_title="Date", yaxis_title="Daily Swap Count", yaxis=dict(tickmode='linear', tick0=0, dtick=1), xaxis=dict( tickmode='array', tickvals=[d for d in swaps_per_chain['date']], ticktext=[d.strftime('%m-%d') for d in swaps_per_chain['date']], tickangle=-45, ), bargap=0.6, bargroupgap=0.1, height=600, width=1200, margin=dict(l=50, r=50, t=50, b=50), showlegend=True, legend=dict( yanchor="top", y=0.99, xanchor="right", x=0.99 ), template='plotly_white' ) fig_swaps_chain.update_xaxes(tickformat="%m-%d") df_transactions["is_bridge"] = df_transactions.apply(lambda x: x["sending_chain"] != x["receiving_chain"], axis=1) bridges_per_chain = df_transactions[df_transactions["is_bridge"]].groupby(["date", "sending_chain"]).size().reset_index(name="bridge_count") fig_bridges_chain = px.bar( bridges_per_chain, x="date", y="bridge_count", color="sending_chain", title="Chain Daily Activity: Bridges", labels={"sending_chain": "Transaction Chain", "bridge_count": "Daily Bridge Nr"}, barmode="stack", opacity=0.7, color_discrete_map={ "Optimism": "blue", "Ethereum": "darkgreen", "Base": "purple", "Mode": "orange" } ) fig_bridges_chain.update_layout( xaxis_title="Date", yaxis_title="Daily Bridge Count", yaxis=dict(tickmode='linear', tick0=0, dtick=1), xaxis=dict( tickmode='array', tickvals=[d for d in bridges_per_chain['date']], ticktext=[d.strftime('%m-%d') for d in bridges_per_chain['date']], tickangle=-45, ), bargap=0.6, bargroupgap=0.1, height=600, width=1200, margin=dict(l=50, r=50, t=50, b=50), showlegend=True, legend=dict( yanchor="top", y=0.99, xanchor="right", x=0.99 ), template='plotly_white' ) fig_bridges_chain.update_xaxes(tickformat="%m-%d") df_agents['date'] = pd.to_datetime(df_agents['date']) daily_agents_df = df_agents.groupby('date').agg({'agent_count': 'sum'}).reset_index() daily_agents_df.rename(columns={'agent_count': 'daily_agent_count'}, inplace=True) # Sort by date to ensure proper running total calculation daily_agents_df = daily_agents_df.sort_values('date') # Create week column daily_agents_df['week'] = daily_agents_df['date'].dt.to_period('W').apply(lambda r: r.start_time) # Calculate running total within each week daily_agents_df['running_weekly_total'] = daily_agents_df.groupby('week')['daily_agent_count'].cumsum() # Create final merged dataframe weekly_merged_df = daily_agents_df.copy() adjustment_date = pd.to_datetime('2024-11-15') weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'daily_agent_count'] -= 1 weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'running_weekly_total'] -= 1 fig_agents_registered = go.Figure(data=[ go.Bar( name='Daily nr of Registered Agents', x=weekly_merged_df['date'].dt.strftime("%b %d"), y=weekly_merged_df['daily_agent_count'], opacity=0.7, marker_color='blue' ), go.Bar( name='Weekly Nr of Registered Agents', x=weekly_merged_df['date'].dt.strftime("%b %d"), y=weekly_merged_df['running_weekly_total'], opacity=0.7, marker_color='purple' ) ]) fig_agents_registered.update_layout( xaxis_title='Date', yaxis_title='Number of Agents', title="Nr of Agents Registered", barmode='group', yaxis=dict(tickmode='linear', tick0=0, dtick=1), xaxis=dict( categoryorder='array', categoryarray=weekly_merged_df['date'].dt.strftime("%b %d"), tickangle=-45 ), bargap=0.3, height=600, width=1200, showlegend=True, legend=dict( yanchor="top", xanchor="right", ), template='plotly_white', ) return fig_swaps_chain, fig_bridges_chain, fig_agents_registered,fig_tvl """ # Placeholder figures for testing fig_swaps_chain = go.Figure() fig_swaps_chain.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) fig_bridges_chain = go.Figure() fig_bridges_chain.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) fig_agents_registered = go.Figure() fig_agents_registered.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) fig_tvl = go.Figure() fig_tvl.add_annotation( text="Blockchain data loading disabled - placeholder visualization", x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False, font=dict(size=20) ) return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl # Gradio interface def dashboard(): with gr.Blocks() as demo: gr.Markdown("# Valory APR Metrics") # APR Metrics tab - the only tab with gr.Tab("APR Metrics"): with gr.Column(): refresh_btn = gr.Button("Refresh APR Data") # Create containers for plotly figures per_agent_graph = gr.Plot(label="APR Per Agent") combined_graph = gr.Plot(label="Combined APR (All Agents)") # Function to update both graphs def update_apr_graphs(): # Generate visualizations and get figure objects directly per_agent_fig, combined_fig, _ = generate_apr_visualizations() return per_agent_fig, combined_fig # Set up the button click event refresh_btn.click( fn=update_apr_graphs, inputs=[], outputs=[per_agent_graph, combined_graph] ) # Initialize the graphs on load # We'll use placeholder figures initially import plotly.graph_objects as go placeholder_fig = go.Figure() placeholder_fig.add_annotation( text="Click 'Refresh APR Data' to load APR graphs", x=0.5, y=0.5, showarrow=False, font=dict(size=15) ) per_agent_graph.value = placeholder_fig combined_graph.value = placeholder_fig return demo # Launch the dashboard if __name__ == "__main__": dashboard().launch()