gauravlochab
feat: add detailed debug logging for time series graph creation and simplify APR data plotting
c009a22
raw
history blame
48.4 kB
import requests
import pandas as pd
import gradio as gr
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
from datetime import datetime, timedelta
import json
# Commenting out blockchain-related imports that cause loading issues
# from web3 import Web3
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import random
import logging
from typing import List, Dict, Any
# Comment out the import for now and replace with dummy functions
# from app_trans_new import create_transcation_visualizations,create_active_agents_visualizations
# APR visualization functions integrated directly
# Set up more detailed logging
logging.basicConfig(
level=logging.DEBUG, # Change to DEBUG for more detailed logs
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("app_debug.log"), # Log to file for persistence
logging.StreamHandler() # Also log to console
]
)
logger = logging.getLogger(__name__)
# Log the startup information
logger.info("============= APPLICATION STARTING =============")
logger.info(f"Running from directory: {os.getcwd()}")
# Global variable to store the data for reuse
global_df = None
# Configuration
API_BASE_URL = "https://afmdb.autonolas.tech"
logger.info(f"Using API endpoint: {API_BASE_URL}")
def get_agent_type_by_name(type_name: str) -> Dict[str, Any]:
"""Get agent type by name"""
url = f"{API_BASE_URL}/api/agent-types/name/{type_name}"
logger.debug(f"Calling API: {url}")
try:
response = requests.get(url)
logger.debug(f"Response status: {response.status_code}")
if response.status_code == 404:
logger.error(f"Agent type '{type_name}' not found")
return None
response.raise_for_status()
result = response.json()
logger.debug(f"Agent type response: {result}")
return result
except Exception as e:
logger.error(f"Error in get_agent_type_by_name: {e}")
return None
def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]:
"""Get attribute definition by name"""
url = f"{API_BASE_URL}/api/attributes/name/{attr_name}"
logger.debug(f"Calling API: {url}")
try:
response = requests.get(url)
logger.debug(f"Response status: {response.status_code}")
if response.status_code == 404:
logger.error(f"Attribute definition '{attr_name}' not found")
return None
response.raise_for_status()
result = response.json()
logger.debug(f"Attribute definition response: {result}")
return result
except Exception as e:
logger.error(f"Error in get_attribute_definition_by_name: {e}")
return None
def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]:
"""Get all agents of a specific type"""
url = f"{API_BASE_URL}/api/agent-types/{type_id}/agents/"
logger.debug(f"Calling API: {url}")
try:
response = requests.get(url)
logger.debug(f"Response status: {response.status_code}")
if response.status_code == 404:
logger.error(f"No agents found for type ID {type_id}")
return []
response.raise_for_status()
result = response.json()
logger.debug(f"Agents count: {len(result)}")
logger.debug(f"First few agents: {result[:2] if result else []}")
return result
except Exception as e:
logger.error(f"Error in get_agents_by_type: {e}")
return []
def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]:
"""Get all attribute values for a specific attribute definition across all agents of a given list"""
all_attributes = []
logger.debug(f"Getting attributes for {len(agents)} agents with attr_def_id: {attr_def_id}")
# For each agent, get their attributes and filter for the one we want
for agent in agents:
agent_id = agent["agent_id"]
# Call the /api/agents/{agent_id}/attributes/ endpoint
url = f"{API_BASE_URL}/api/agents/{agent_id}/attributes/"
logger.debug(f"Calling API for agent {agent_id}: {url}")
try:
response = requests.get(url, params={"limit": 1000})
if response.status_code == 404:
logger.error(f"No attributes found for agent ID {agent_id}")
continue
response.raise_for_status()
agent_attrs = response.json()
logger.debug(f"Agent {agent_id} has {len(agent_attrs)} attributes")
# Filter for the specific attribute definition ID
filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id]
logger.debug(f"Agent {agent_id} has {len(filtered_attrs)} APR attributes")
if filtered_attrs:
logger.debug(f"Sample attribute for agent {agent_id}: {filtered_attrs[0]}")
all_attributes.extend(filtered_attrs)
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}")
logger.info(f"Total APR attributes found across all agents: {len(all_attributes)}")
return all_attributes
def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str:
"""Get agent name from agent ID"""
for agent in agents:
if agent["agent_id"] == agent_id:
return agent["agent_name"]
return "Unknown"
def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]:
"""Extract APR value and timestamp from JSON value"""
try:
agent_id = attr.get("agent_id", "unknown")
logger.debug(f"Extracting APR value for agent {agent_id}")
# The APR value is stored in the json_value field
if attr["json_value"] is None:
logger.debug(f"Agent {agent_id}: json_value is None")
return {"apr": None, "timestamp": None, "agent_id": agent_id, "is_dummy": False}
# If json_value is a string, parse it
if isinstance(attr["json_value"], str):
logger.debug(f"Agent {agent_id}: json_value is string, parsing")
json_data = json.loads(attr["json_value"])
else:
json_data = attr["json_value"]
apr = json_data.get("apr")
timestamp = json_data.get("timestamp")
logger.debug(f"Agent {agent_id}: Raw APR value: {apr}, timestamp: {timestamp}")
# Convert timestamp to datetime if it exists
timestamp_dt = None
if timestamp:
timestamp_dt = datetime.fromtimestamp(timestamp)
result = {"apr": apr, "timestamp": timestamp_dt, "agent_id": agent_id, "is_dummy": False}
logger.debug(f"Agent {agent_id}: Extracted result: {result}")
return result
except (json.JSONDecodeError, KeyError, TypeError) as e:
logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}")
logger.error(f"Problematic json_value: {attr.get('json_value')}")
return {"apr": None, "timestamp": None, "agent_id": attr.get('agent_id'), "is_dummy": False}
def fetch_apr_data_from_db():
"""
Fetch APR data from database using the API.
"""
global global_df
logger.info("==== Starting APR data fetch ====")
try:
# Step 1: Find the Modius agent type
logger.info("Finding Modius agent type")
modius_type = get_agent_type_by_name("Modius")
if not modius_type:
logger.error("Modius agent type not found, using placeholder data")
global_df = pd.DataFrame([])
return global_df
type_id = modius_type["type_id"]
logger.info(f"Found Modius agent type with ID: {type_id}")
# Step 2: Find the APR attribute definition
logger.info("Finding APR attribute definition")
apr_attr_def = get_attribute_definition_by_name("APR")
if not apr_attr_def:
logger.error("APR attribute definition not found, using placeholder data")
global_df = pd.DataFrame([])
return global_df
attr_def_id = apr_attr_def["attr_def_id"]
logger.info(f"Found APR attribute definition with ID: {attr_def_id}")
# Step 3: Get all agents of type Modius
logger.info(f"Getting all agents of type Modius (type_id: {type_id})")
modius_agents = get_agents_by_type(type_id)
if not modius_agents:
logger.error("No agents of type 'Modius' found")
global_df = pd.DataFrame([])
return global_df
logger.info(f"Found {len(modius_agents)} Modius agents")
logger.debug(f"Modius agents: {[{'agent_id': a['agent_id'], 'agent_name': a['agent_name']} for a in modius_agents]}")
# Step 4: Fetch all APR values for Modius agents
logger.info(f"Fetching APR values for all Modius agents (attr_def_id: {attr_def_id})")
apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id)
if not apr_attributes:
logger.error("No APR values found for 'Modius' agents")
global_df = pd.DataFrame([])
return global_df
logger.info(f"Found {len(apr_attributes)} APR attributes total")
# Step 5: Extract APR data
logger.info("Extracting APR data from attributes")
apr_data_list = []
for attr in apr_attributes:
apr_data = extract_apr_value(attr)
if apr_data["apr"] is not None and apr_data["timestamp"] is not None:
# Get agent name
agent_name = get_agent_name(attr["agent_id"], modius_agents)
# Add agent name to the data
apr_data["agent_name"] = agent_name
# Add is_dummy flag (all real data)
apr_data["is_dummy"] = False
# Include all APR values (including negative ones) EXCEPT zero and -100
if apr_data["apr"] != 0 and apr_data["apr"] != -100:
apr_data["metric_type"] = "APR"
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}")
# Add to the data list
apr_data_list.append(apr_data)
else:
# Log that we're skipping zero or -100 values
logger.debug(f"Skipping value for agent {agent_name} ({attr['agent_id']}): {apr_data['apr']} (zero or -100)")
# Convert list of dictionaries to DataFrame
if not apr_data_list:
logger.error("No valid APR data extracted")
global_df = pd.DataFrame([])
return global_df
global_df = pd.DataFrame(apr_data_list)
# Log the resulting dataframe
logger.info(f"Created DataFrame with {len(global_df)} rows")
logger.info(f"DataFrame columns: {global_df.columns.tolist()}")
logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}")
# All values are APR type (excluding zero and -100 values)
logger.info("All values are APR type (excluding zero and -100 values)")
logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}")
# Log the entire dataframe for debugging
logger.debug("Final DataFrame contents:")
for idx, row in global_df.iterrows():
logger.debug(f"Row {idx}: {row.to_dict()}")
return global_df
except requests.exceptions.RequestException as e:
logger.error(f"API request error: {e}")
global_df = pd.DataFrame([])
return global_df
except Exception as e:
logger.error(f"Error fetching APR data: {e}")
logger.exception("Exception details:")
global_df = pd.DataFrame([])
return global_df
def generate_apr_visualizations():
"""Generate APR visualizations with real data only (no dummy data)"""
global global_df
# Fetch data from database
df = fetch_apr_data_from_db()
# If we got no data at all, return placeholder figures
if df.empty:
logger.info("No APR data available. Using fallback visualization.")
# Create empty visualizations with a message using Plotly
fig = go.Figure()
fig.add_annotation(
x=0.5, y=0.5,
text="No APR data available",
font=dict(size=20),
showarrow=False
)
fig.update_layout(
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)
)
# Save as static file for reference
fig.write_html("modius_apr_combined_graph.html")
fig.write_image("modius_apr_combined_graph.png")
csv_file = None
return fig, csv_file
# No longer generating dummy data
# Set global_df for access by other functions
global_df = df
# Save to CSV before creating visualizations
csv_file = save_to_csv(df)
# Only create combined time series graph
combined_fig = create_combined_time_series_graph(df)
return combined_fig, csv_file
def create_time_series_graph_per_agent(df):
"""Create a time series graph for each agent using Plotly"""
# Get unique agents
unique_agents = df['agent_id'].unique()
if len(unique_agents) == 0:
logger.error("No agent data to plot")
fig = go.Figure()
fig.add_annotation(
text="No agent data available",
x=0.5, y=0.5,
showarrow=False, font=dict(size=20)
)
return fig
# Create a subplot figure for each agent
fig = make_subplots(rows=len(unique_agents), cols=1,
subplot_titles=[f"Agent: {df[df['agent_id'] == agent_id]['agent_name'].iloc[0]}"
for agent_id in unique_agents],
vertical_spacing=0.1)
# Plot data for each agent
for i, agent_id in enumerate(unique_agents):
agent_data = df[df['agent_id'] == agent_id].copy()
agent_name = agent_data['agent_name'].iloc[0]
row = i + 1
# Add zero line to separate APR and Performance
fig.add_shape(
type="line", line=dict(dash="solid", width=1.5, color="black"),
y0=0, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(),
row=row, col=1
)
# Add background colors
fig.add_shape(
type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0),
y0=0, y1=1000, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(),
row=row, col=1, layer="below"
)
fig.add_shape(
type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0),
y0=-1000, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(),
row=row, col=1, layer="below"
)
# Create separate dataframes for different data types
apr_data = agent_data[agent_data['metric_type'] == 'APR']
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
# Sort all data by timestamp for the line plots
combined_agent_data = agent_data.sort_values('timestamp')
# Add main line connecting all points
fig.add_trace(
go.Scatter(
x=combined_agent_data['timestamp'],
y=combined_agent_data['apr'],
mode='lines',
line=dict(color='purple', width=2),
name=f'{agent_name}',
legendgroup=agent_name,
showlegend=(i == 0), # Only show in legend once
hovertemplate='Time: %{x}<br>Value: %{y:.2f}<extra></extra>'
),
row=row, col=1
)
# Add scatter points for APR values
if not apr_data.empty:
fig.add_trace(
go.Scatter(
x=apr_data['timestamp'],
y=apr_data['apr'],
mode='markers',
marker=dict(color='blue', size=10, symbol='circle'),
name='APR',
legendgroup='APR',
showlegend=(i == 0),
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<extra></extra>'
),
row=row, col=1
)
# Add scatter points for Performance values
if not perf_data.empty:
fig.add_trace(
go.Scatter(
x=perf_data['timestamp'],
y=perf_data['apr'],
mode='markers',
marker=dict(color='red', size=10, symbol='square'),
name='Performance',
legendgroup='Performance',
showlegend=(i == 0),
hovertemplate='Time: %{x}<br>Performance: %{y:.2f}<extra></extra>'
),
row=row, col=1
)
# Update axes
fig.update_xaxes(title_text="Time", row=row, col=1)
fig.update_yaxes(title_text="Value", row=row, col=1, gridcolor='rgba(0,0,0,0.1)')
# Update layout
fig.update_layout(
height=400 * len(unique_agents),
width=1000,
title_text="APR and Performance Values per Agent",
template="plotly_white",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
margin=dict(r=20, l=20, t=30, b=20),
hovermode="closest"
)
# Save the figure (still useful for reference)
graph_file = "modius_apr_per_agent_graph.html"
fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False)
# Also save as image for compatibility
img_file = "modius_apr_per_agent_graph.png"
fig.write_image(img_file)
logger.info(f"Per-agent graph saved to {graph_file} and {img_file}")
# Return the figure object for direct use in Gradio
return fig
def write_debug_info(df, fig):
"""Write detailed debug information to logs for troubleshooting"""
try:
logger.info("==== GRAPH DEBUG INFORMATION ====")
logger.info(f"Total data points: {len(df)}")
logger.info(f"DataFrame columns: {df.columns.tolist()}")
logger.info("Data types:")
for col in df.columns:
logger.info(f" {col}: {df[col].dtype}")
# Output sample data points
logger.info("Sample data (up to 5 rows):")
sample_df = df.head(5)
for idx, row in sample_df.iterrows():
logger.info(f" Row {idx}: {row.to_dict()}")
# Output Plotly figure structure
logger.info("Plotly Figure Structure:")
logger.info(f" Number of traces: {len(fig.data)}")
for i, trace in enumerate(fig.data):
logger.info(f" Trace {i}:")
logger.info(f" Type: {trace.type}")
logger.info(f" Mode: {trace.mode if hasattr(trace, 'mode') else 'N/A'}")
logger.info(f" Name: {trace.name}")
# Only log first few values to avoid overwhelming logs
if hasattr(trace, 'x') and trace.x is not None and len(trace.x) > 0:
x_sample = str(trace.x[:2])
logger.info(f" X data sample (first 2): {x_sample}")
if hasattr(trace, 'y') and trace.y is not None and len(trace.y) > 0:
y_sample = str(trace.y[:2])
logger.info(f" Y data sample (first 2): {y_sample}")
if hasattr(trace, 'line') and hasattr(trace.line, 'color'):
logger.info(f" Line color: {trace.line.color}")
if hasattr(trace, 'line') and hasattr(trace.line, 'width'):
logger.info(f" Line width: {trace.line.width}")
# Check environment
import os
import sys
import platform
logger.info("Environment Information:")
logger.info(f" Platform: {platform.platform()}")
logger.info(f" Python version: {sys.version}")
logger.info(f" Running in Docker: {'DOCKER_CONTAINER' in os.environ}")
logger.info(f" Running in HF Space: {'SPACE_ID' in os.environ}")
# Plotly version
import plotly
logger.info(f" Plotly version: {plotly.__version__}")
logger.info("End of debug info")
return True
except Exception as e:
logger.error(f"Error writing debug info: {e}")
return False
def create_combined_time_series_graph(df):
"""Create a combined time series graph for all agents using Plotly"""
if len(df) == 0:
logger.error("No data to plot combined graph")
fig = go.Figure()
fig.add_annotation(
text="No data available",
x=0.5, y=0.5,
showarrow=False, font=dict(size=20)
)
return fig
# IMPORTANT: Force data types to ensure consistency
df['apr'] = df['apr'].astype(float) # Ensure APR is float
df['metric_type'] = df['metric_type'].astype(str) # Ensure metric_type is string
# CRITICAL: Log the exact dataframe we're using for plotting to help debug
logger.info(f"Graph data - shape: {df.shape}, columns: {df.columns}")
logger.info(f"Graph data - unique agents: {df['agent_name'].unique().tolist()}")
logger.info("Graph data - all positive APR values only")
logger.info(f"Graph data - min APR: {df['apr'].min()}, max APR: {df['apr'].max()}")
# Export full dataframe to CSV for debugging
debug_csv = "debug_graph_data.csv"
df.to_csv(debug_csv)
logger.info(f"Exported graph data to {debug_csv} for debugging")
# Write detailed data report
with open("debug_graph_data_report.txt", "w") as f:
f.write("==== GRAPH DATA REPORT ====\n\n")
f.write(f"Total data points: {len(df)}\n")
f.write(f"Timestamp range: {df['timestamp'].min()} to {df['timestamp'].max()}\n\n")
# Output per-agent details
unique_agents = df['agent_id'].unique()
f.write(f"Number of agents: {len(unique_agents)}\n\n")
for agent_id in unique_agents:
agent_data = df[df['agent_id'] == agent_id]
agent_name = agent_data['agent_name'].iloc[0]
f.write(f"== Agent: {agent_name} (ID: {agent_id}) ==\n")
f.write(f" Total data points: {len(agent_data)}\n")
apr_data = agent_data[agent_data['metric_type'] == 'APR']
f.write(f" APR data points: {len(apr_data)}\n")
if not apr_data.empty:
f.write(f" APR values: {apr_data['apr'].tolist()}\n")
f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n")
f.write("\n")
logger.info("Generated detailed graph data report")
# ENSURE THERE ARE NO CONFLICTING AXES OR TRACES
# Create Plotly figure in a clean state
fig = go.Figure()
# Get unique agents
unique_agents = df['agent_id'].unique()
colors = px.colors.qualitative.Plotly[:len(unique_agents)]
# Update y-axis range to include negative values
min_apr = min(df['apr'].min() * 1.1, -10) # Add 10% padding, minimum of -10
max_apr = max(df['apr'].max() * 1.1, 10) # Add 10% padding, minimum of 10
# Add background shapes for APR and Performance regions
min_time = df['timestamp'].min()
max_time = df['timestamp'].max()
# Add shape for positive APR region (above zero)
fig.add_shape(
type="rect",
fillcolor="rgba(230, 243, 255, 0.3)",
line=dict(width=0),
y0=0, y1=max_apr,
x0=min_time, x1=max_time,
layer="below"
)
# Add shape for negative APR region (below zero)
fig.add_shape(
type="rect",
fillcolor="rgba(255, 230, 230, 0.3)",
line=dict(width=0),
y0=min_apr, y1=0,
x0=min_time, x1=max_time,
layer="below"
)
# Add zero line
fig.add_shape(
type="line",
line=dict(dash="solid", width=1.5, color="black"),
y0=0, y1=0,
x0=min_time, x1=max_time
)
# MODIFIED: Changed order of trace addition - only need APR values now
# Add data for each agent
for i, agent_id in enumerate(unique_agents):
agent_data = df[df['agent_id'] == agent_id].copy()
agent_name = agent_data['agent_name'].iloc[0]
color = colors[i % len(colors)]
# Sort the data by timestamp
agent_data = agent_data.sort_values('timestamp')
# Log actual points being plotted for this agent
logger.info(f"Plotting agent: {agent_name} (ID: {agent_id}) with {len(agent_data)} points")
for idx, row in agent_data.iterrows():
logger.info(f" Point {idx}: timestamp={row['timestamp']}, apr={row['apr']}, type={row['metric_type']}")
# Get the APR data - this is what we'll plot
apr_data = agent_data[agent_data['metric_type'] == 'APR']
# SIMPLIFIED APPROACH: Use a single trace with lines+markers mode
# This is much more reliable across different platforms
if not apr_data.empty:
logger.info(f" Adding combined line+markers for {agent_name}")
# Explicitly convert to Python lists
x_values = apr_data['timestamp'].tolist()
y_values = apr_data['apr'].tolist()
# Log what we're about to plot
for i, (x, y) in enumerate(zip(x_values, y_values)):
logger.info(f" Point {i+1}: x={x}, y={y}")
# Use a single trace for both markers and lines
fig.add_trace(
go.Scatter(
x=x_values,
y=y_values,
mode='lines+markers', # Important: use both lines and markers
marker=dict(
color='blue',
symbol='circle',
size=12,
line=dict(width=2, color='black')
),
line=dict(color='blue', width=2),
name=agent_name,
legendgroup=agent_name,
showlegend=True,
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>'
)
)
logger.info(f" Added combined line+markers trace for {agent_name}")
# Update layout - use simple boolean values everywhere
fig.update_layout(
title="APR Values for All Agents",
xaxis_title="Time",
yaxis_title="Value",
template="plotly_white",
height=600,
width=1000,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
groupclick="toggleitem"
),
margin=dict(r=20, l=20, t=30, b=20),
hovermode="closest"
)
# FORCE FIXED Y-AXIS RANGE
fig.update_yaxes(
showgrid=True,
gridwidth=1,
gridcolor='rgba(0,0,0,0.1)',
range=[min_apr, max_apr], # Updated range including negative values
tickmode='linear',
tick0=0,
dtick=10
)
# Update x-axis
fig.update_xaxes(
showgrid=True,
gridwidth=1,
gridcolor='rgba(0,0,0,0.1)'
)
# SIMPLIFIED APPROACH: Do a direct plot without markers for comparison
# This creates a simple, reliable fallback plot if the advanced one fails
try:
# Write detailed debug information before saving the figure
write_debug_info(df, fig)
# Save the figure (still useful for reference)
graph_file = "modius_apr_combined_graph.html"
fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False)
# Also save as image for compatibility
img_file = "modius_apr_combined_graph.png"
try:
fig.write_image(img_file)
logger.info(f"Combined graph saved to {graph_file} and {img_file}")
except Exception as e:
logger.error(f"Error saving image: {e}")
logger.info(f"Combined graph saved to {graph_file} only")
# Return the figure object for direct use in Gradio
return fig
except Exception as e:
# If the complex graph approach fails, create a simpler one
logger.error(f"Error creating advanced graph: {e}")
logger.info("Falling back to simpler graph")
# Create a simpler graph as fallback
simple_fig = go.Figure()
# Add zero line
simple_fig.add_shape(
type="line",
line=dict(dash="solid", width=1.5, color="black"),
y0=0, y1=0,
x0=min_time, x1=max_time
)
# Simply plot each agent's data as a line with markers
for i, agent_id in enumerate(unique_agents):
agent_data = df[df['agent_id'] == agent_id].copy()
agent_name = agent_data['agent_name'].iloc[0]
color = colors[i % len(colors)]
# Sort by timestamp
agent_data = agent_data.sort_values('timestamp')
# Add a single trace with markers+lines
simple_fig.add_trace(
go.Scatter(
x=agent_data['timestamp'],
y=agent_data['apr'],
mode='lines+markers',
name=agent_name,
marker=dict(size=10),
line=dict(width=2)
)
)
# Simplified layout
simple_fig.update_layout(
title="APR Values for All Agents",
xaxis_title="Time",
yaxis_title="Value",
yaxis=dict(range=[min_apr, max_apr]),
height=600,
width=1000
)
# Return the simple figure
return simple_fig
def save_to_csv(df):
"""Save the APR data DataFrame to a CSV file and return the file path"""
if df.empty:
logger.error("No APR data to save to CSV")
return None
# Define the CSV file path
csv_file = "modius_apr_values.csv"
# Save to CSV
df.to_csv(csv_file, index=False)
logger.info(f"APR data saved to {csv_file}")
# Also generate a statistics CSV file
stats_df = generate_statistics_from_data(df)
stats_csv = "modius_apr_statistics.csv"
stats_df.to_csv(stats_csv, index=False)
logger.info(f"Statistics saved to {stats_csv}")
return csv_file
def generate_statistics_from_data(df):
"""Generate statistics from the APR data"""
if df.empty:
return pd.DataFrame()
# Get unique agents
unique_agents = df['agent_id'].unique()
stats_list = []
# Generate per-agent statistics
for agent_id in unique_agents:
agent_data = df[df['agent_id'] == agent_id]
agent_name = agent_data['agent_name'].iloc[0]
# APR statistics
apr_data = agent_data[agent_data['metric_type'] == 'APR']
real_apr = apr_data[apr_data['is_dummy'] == False]
# Performance statistics
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
real_perf = perf_data[perf_data['is_dummy'] == False]
stats = {
'agent_id': agent_id,
'agent_name': agent_name,
'total_points': len(agent_data),
'apr_points': len(apr_data),
'performance_points': len(perf_data),
'real_apr_points': len(real_apr),
'real_performance_points': len(real_perf),
'avg_apr': apr_data['apr'].mean() if not apr_data.empty else None,
'avg_performance': perf_data['apr'].mean() if not perf_data.empty else None,
'max_apr': apr_data['apr'].max() if not apr_data.empty else None,
'min_apr': apr_data['apr'].min() if not apr_data.empty else None,
'latest_timestamp': agent_data['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not agent_data.empty else None
}
stats_list.append(stats)
# Generate overall statistics
apr_only = df[df['metric_type'] == 'APR']
perf_only = df[df['metric_type'] == 'Performance']
overall_stats = {
'agent_id': 'ALL',
'agent_name': 'All Agents',
'total_points': len(df),
'apr_points': len(apr_only),
'performance_points': len(perf_only),
'real_apr_points': len(apr_only[apr_only['is_dummy'] == False]),
'real_performance_points': len(perf_only[perf_only['is_dummy'] == False]),
'avg_apr': apr_only['apr'].mean() if not apr_only.empty else None,
'avg_performance': perf_only['apr'].mean() if not perf_only.empty else None,
'max_apr': apr_only['apr'].max() if not apr_only.empty else None,
'min_apr': apr_only['apr'].min() if not apr_only.empty else None,
'latest_timestamp': df['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not df.empty else None
}
stats_list.append(overall_stats)
return pd.DataFrame(stats_list)
# Create dummy functions for the commented out imports
def create_transcation_visualizations():
"""Dummy implementation that returns a placeholder graph"""
fig = go.Figure()
fig.add_annotation(
text="Blockchain data loading disabled - placeholder visualization",
x=0.5, y=0.5, xref="paper", yref="paper",
showarrow=False, font=dict(size=20)
)
return fig
def create_active_agents_visualizations():
"""Dummy implementation that returns a placeholder graph"""
fig = go.Figure()
fig.add_annotation(
text="Blockchain data loading disabled - placeholder visualization",
x=0.5, y=0.5, xref="paper", yref="paper",
showarrow=False, font=dict(size=20)
)
return fig
# Comment out the blockchain connection code
"""
# Load environment variables from .env file
# RPC URLs
OPTIMISM_RPC_URL = os.getenv('OPTIMISM_RPC_URL')
MODE_RPC_URL = os.getenv('MODE_RPC_URL')
# Initialize Web3 instances
web3_instances = {
'optimism': Web3(Web3.HTTPProvider(OPTIMISM_RPC_URL)),
'mode': Web3(Web3.HTTPProvider(MODE_RPC_URL))
}
# Contract addresses for service registries
contract_addresses = {
'optimism': '0x3d77596beb0f130a4415df3D2D8232B3d3D31e44',
'mode': '0x3C1fF68f5aa342D296d4DEe4Bb1cACCA912D95fE'
}
# Load the ABI from the provided JSON file
with open('./contracts/service_registry_abi.json', 'r') as abi_file:
contract_abi = json.load(abi_file)
# Create the contract instances
service_registries = {
chain_name: web3.eth.contract(address=contract_addresses[chain_name], abi=contract_abi)
for chain_name, web3 in web3_instances.items()
}
# Check if connections are successful
for chain_name, web3_instance in web3_instances.items():
if not web3_instance.is_connected():
raise Exception(f"Failed to connect to the {chain_name.capitalize()} network.")
else:
print(f"Successfully connected to the {chain_name.capitalize()} network.")
"""
# Dummy blockchain functions to replace the commented ones
def get_transfers(integrator: str, wallet: str) -> str:
"""Dummy function that returns an empty result"""
return {"transfers": []}
def fetch_and_aggregate_transactions():
"""Dummy function that returns empty data"""
return [], {}
# Function to parse the transaction data and prepare it for visualization
def process_transactions_and_agents(data):
"""Dummy function that returns empty dataframes"""
df_transactions = pd.DataFrame()
df_agents = pd.DataFrame(columns=['date', 'agent_count'])
df_agents_weekly = pd.DataFrame()
return df_transactions, df_agents, df_agents_weekly
# Function to create visualizations based on the metrics
def create_visualizations():
"""
# Commenting out the original visualization code temporarily for debugging
transactions_data = fetch_and_aggregate_transactions()
df_transactions, df_agents, df_agents_weekly = process_transactions_and_agents(transactions_data)
# Fetch daily value locked data
df_tvl = pd.read_csv('daily_value_locked.csv')
# Calculate total value locked per chain per day
df_tvl["total_value_locked_usd"] = df_tvl["amount0_usd"] + df_tvl["amount1_usd"]
df_tvl_daily = df_tvl.groupby(["date", "chain_name"])["total_value_locked_usd"].sum().reset_index()
df_tvl_daily['date'] = pd.to_datetime(df_tvl_daily['date'])
# Filter out dates with zero total value locked
df_tvl_daily = df_tvl_daily[df_tvl_daily["total_value_locked_usd"] > 0]
chain_name_map = {
"mode": "Mode",
"base": "Base",
"ethereum": "Ethereum",
"optimism": "Optimism"
}
df_tvl_daily["chain_name"] = df_tvl_daily["chain_name"].map(chain_name_map)
# Plot total value locked
fig_tvl = px.bar(
df_tvl_daily,
x="date",
y="total_value_locked_usd",
color="chain_name",
opacity=0.7,
title="Total Volume Invested in Pools in Different Chains Daily",
labels={"date": "Date","chain_name": "Transaction Chain", "total_value_locked_usd": "Total Volume Invested (USD)"},
barmode='stack',
color_discrete_map={
"Mode": "orange",
"Base": "purple",
"Ethereum": "darkgreen",
"Optimism": "blue"
}
)
fig_tvl.update_layout(
xaxis_title="Date",
yaxis=dict(tickmode='linear', tick0=0, dtick=4),
xaxis=dict(
tickmode='array',
tickvals=df_tvl_daily['date'],
ticktext=df_tvl_daily['date'].dt.strftime('%b %d'),
tickangle=-45,
),
bargap=0.6, # Increase gap between bar groups (0-1)
bargroupgap=0.1, # Decrease gap between bars in a group (0-1)
height=600,
width=1200, # Specify width to prevent bars from being too wide
showlegend=True,
template='plotly_white'
)
fig_tvl.update_xaxes(tickformat="%b %d")
chain_name_map = {
10: "Optimism",
8453: "Base",
1: "Ethereum",
34443: "Mode"
}
df_transactions["sending_chain"] = df_transactions["sending_chain"].map(chain_name_map)
df_transactions["receiving_chain"] = df_transactions["receiving_chain"].map(chain_name_map)
df_transactions["sending_chain"] = df_transactions["sending_chain"].astype(str)
df_transactions["receiving_chain"] = df_transactions["receiving_chain"].astype(str)
df_transactions['date'] = pd.to_datetime(df_transactions['date'])
df_transactions["is_swap"] = df_transactions.apply(lambda x: x["sending_chain"] == x["receiving_chain"], axis=1)
swaps_per_chain = df_transactions[df_transactions["is_swap"]].groupby(["date", "sending_chain"]).size().reset_index(name="swap_count")
fig_swaps_chain = px.bar(
swaps_per_chain,
x="date",
y="swap_count",
color="sending_chain",
title="Chain Daily Activity: Swaps",
labels={"sending_chain": "Transaction Chain", "swap_count": "Daily Swap Nr"},
barmode="stack",
opacity=0.7,
color_discrete_map={
"Optimism": "blue",
"Ethereum": "darkgreen",
"Base": "purple",
"Mode": "orange"
}
)
fig_swaps_chain.update_layout(
xaxis_title="Date",
yaxis_title="Daily Swap Count",
yaxis=dict(tickmode='linear', tick0=0, dtick=1),
xaxis=dict(
tickmode='array',
tickvals=[d for d in swaps_per_chain['date']],
ticktext=[d.strftime('%m-%d') for d in swaps_per_chain['date']],
tickangle=-45,
),
bargap=0.6,
bargroupgap=0.1,
height=600,
width=1200,
margin=dict(l=50, r=50, t=50, b=50),
showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="right",
x=0.99
),
template='plotly_white'
)
fig_swaps_chain.update_xaxes(tickformat="%m-%d")
df_transactions["is_bridge"] = df_transactions.apply(lambda x: x["sending_chain"] != x["receiving_chain"], axis=1)
bridges_per_chain = df_transactions[df_transactions["is_bridge"]].groupby(["date", "sending_chain"]).size().reset_index(name="bridge_count")
fig_bridges_chain = px.bar(
bridges_per_chain,
x="date",
y="bridge_count",
color="sending_chain",
title="Chain Daily Activity: Bridges",
labels={"sending_chain": "Transaction Chain", "bridge_count": "Daily Bridge Nr"},
barmode="stack",
opacity=0.7,
color_discrete_map={
"Optimism": "blue",
"Ethereum": "darkgreen",
"Base": "purple",
"Mode": "orange"
}
)
fig_bridges_chain.update_layout(
xaxis_title="Date",
yaxis_title="Daily Bridge Count",
yaxis=dict(tickmode='linear', tick0=0, dtick=1),
xaxis=dict(
tickmode='array',
tickvals=[d for d in bridges_per_chain['date']],
ticktext=[d.strftime('%m-%d') for d in bridges_per_chain['date']],
tickangle=-45,
),
bargap=0.6,
bargroupgap=0.1,
height=600,
width=1200,
margin=dict(l=50, r=50, t=50, b=50),
showlegend=True,
legend=dict(
yanchor="top",
y=0.99,
xanchor="right",
x=0.99
),
template='plotly_white'
)
fig_bridges_chain.update_xaxes(tickformat="%m-%d")
df_agents['date'] = pd.to_datetime(df_agents['date'])
daily_agents_df = df_agents.groupby('date').agg({'agent_count': 'sum'}).reset_index()
daily_agents_df.rename(columns={'agent_count': 'daily_agent_count'}, inplace=True)
# Sort by date to ensure proper running total calculation
daily_agents_df = daily_agents_df.sort_values('date')
# Create week column
daily_agents_df['week'] = daily_agents_df['date'].dt.to_period('W').apply(lambda r: r.start_time)
# Calculate running total within each week
daily_agents_df['running_weekly_total'] = daily_agents_df.groupby('week')['daily_agent_count'].cumsum()
# Create final merged dataframe
weekly_merged_df = daily_agents_df.copy()
adjustment_date = pd.to_datetime('2024-11-15')
weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'daily_agent_count'] -= 1
weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'running_weekly_total'] -= 1
fig_agents_registered = go.Figure(data=[
go.Bar(
name='Daily nr of Registered Agents',
x=weekly_merged_df['date'].dt.strftime("%b %d"),
y=weekly_merged_df['daily_agent_count'],
opacity=0.7,
marker_color='blue'
),
go.Bar(
name='Weekly Nr of Registered Agents',
x=weekly_merged_df['date'].dt.strftime("%b %d"),
y=weekly_merged_df['running_weekly_total'],
opacity=0.7,
marker_color='purple'
)
])
fig_agents_registered.update_layout(
xaxis_title='Date',
yaxis_title='Number of Agents',
title="Nr of Agents Registered",
barmode='group',
yaxis=dict(tickmode='linear', tick0=0, dtick=1),
xaxis=dict(
categoryorder='array',
categoryarray=weekly_merged_df['date'].dt.strftime("%b %d"),
tickangle=-45
),
bargap=0.3,
height=600,
width=1200,
showlegend=True,
legend=dict(
yanchor="top",
xanchor="right",
),
template='plotly_white',
)
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered,fig_tvl
"""
# Placeholder figures for testing
fig_swaps_chain = go.Figure()
fig_swaps_chain.add_annotation(
text="Blockchain data loading disabled - placeholder visualization",
x=0.5, y=0.5, xref="paper", yref="paper",
showarrow=False, font=dict(size=20)
)
fig_bridges_chain = go.Figure()
fig_bridges_chain.add_annotation(
text="Blockchain data loading disabled - placeholder visualization",
x=0.5, y=0.5, xref="paper", yref="paper",
showarrow=False, font=dict(size=20)
)
fig_agents_registered = go.Figure()
fig_agents_registered.add_annotation(
text="Blockchain data loading disabled - placeholder visualization",
x=0.5, y=0.5, xref="paper", yref="paper",
showarrow=False, font=dict(size=20)
)
fig_tvl = go.Figure()
fig_tvl.add_annotation(
text="Blockchain data loading disabled - placeholder visualization",
x=0.5, y=0.5, xref="paper", yref="paper",
showarrow=False, font=dict(size=20)
)
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl
# Modify dashboard function to remove the diagnostics section
def dashboard():
with gr.Blocks() as demo:
gr.Markdown("# Valory APR Metrics")
# APR Metrics tab - the only tab
with gr.Tab("APR Metrics"):
with gr.Column():
refresh_btn = gr.Button("Refresh APR Data")
# Create container for plotly figure (combined graph only)
combined_graph = gr.Plot(label="APR for All Agents")
# Function to update the graph
def update_apr_graph():
# Generate visualization and get figure object directly
try:
combined_fig, _ = generate_apr_visualizations()
return combined_fig
except Exception as e:
logger.exception("Error generating APR visualization")
# Create error figure
error_fig = go.Figure()
error_fig.add_annotation(
text=f"Error: {str(e)}",
x=0.5, y=0.5,
showarrow=False,
font=dict(size=15, color="red")
)
return error_fig
# Set up the button click event with error handling
try:
# Use Gradio's button click properly
refresh_btn.click(fn=update_apr_graph, outputs=combined_graph)
except Exception as e:
logger.error(f"Error setting up button handler: {e}")
# Initialize the graph on load
# We'll use placeholder figure initially
placeholder_fig = go.Figure()
placeholder_fig.add_annotation(
text="Click 'Refresh APR Data' to load APR graph",
x=0.5, y=0.5,
showarrow=False,
font=dict(size=15)
)
combined_graph.value = placeholder_fig
return demo
# Launch the dashboard
if __name__ == "__main__":
dashboard().launch()