Spaces:
Running
Running
gauravlochab
commited on
Commit
·
3663584
1
Parent(s):
cba6d8a
fix: run diagnostics
Browse files
app.py
CHANGED
@@ -19,84 +19,128 @@ from typing import List, Dict, Any
|
|
19 |
# from app_trans_new import create_transcation_visualizations,create_active_agents_visualizations
|
20 |
# APR visualization functions integrated directly
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
logger = logging.getLogger(__name__)
|
24 |
|
|
|
|
|
|
|
|
|
25 |
# Global variable to store the data for reuse
|
26 |
global_df = None
|
27 |
|
28 |
# Configuration
|
29 |
API_BASE_URL = "https://afmdb.autonolas.tech"
|
30 |
-
|
31 |
-
# Add a timezone adjustment function at the top of the file after imports
|
32 |
-
def adjust_timestamp(timestamp_dt, hours_offset=0):
|
33 |
-
"""
|
34 |
-
Adjust a timestamp by the specified number of hours.
|
35 |
-
Used to correct for timezone differences between environments.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
timestamp_dt: datetime object to adjust
|
39 |
-
hours_offset: number of hours to add (can be negative)
|
40 |
-
|
41 |
-
Returns:
|
42 |
-
Adjusted datetime object
|
43 |
-
"""
|
44 |
-
if timestamp_dt is None:
|
45 |
-
return None
|
46 |
-
|
47 |
-
return timestamp_dt + timedelta(hours=hours_offset)
|
48 |
|
49 |
def get_agent_type_by_name(type_name: str) -> Dict[str, Any]:
|
50 |
"""Get agent type by name"""
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
return None
|
55 |
-
response.raise_for_status()
|
56 |
-
return response.json()
|
57 |
|
58 |
def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]:
|
59 |
"""Get attribute definition by name"""
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
return None
|
64 |
-
response.raise_for_status()
|
65 |
-
return response.json()
|
66 |
|
67 |
def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]:
|
68 |
"""Get all agents of a specific type"""
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
return []
|
73 |
-
response.raise_for_status()
|
74 |
-
return response.json()
|
75 |
|
76 |
def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]:
|
77 |
"""Get all attribute values for a specific attribute definition across all agents of a given list"""
|
78 |
all_attributes = []
|
|
|
79 |
|
80 |
# For each agent, get their attributes and filter for the one we want
|
81 |
for agent in agents:
|
82 |
agent_id = agent["agent_id"]
|
83 |
|
84 |
# Call the /api/agents/{agent_id}/attributes/ endpoint
|
85 |
-
|
86 |
-
|
87 |
-
logger.error(f"No attributes found for agent ID {agent_id}")
|
88 |
-
continue
|
89 |
|
90 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
response.raise_for_status()
|
92 |
agent_attrs = response.json()
|
|
|
93 |
|
94 |
# Filter for the specific attribute definition ID
|
95 |
filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id]
|
|
|
|
|
|
|
|
|
|
|
96 |
all_attributes.extend(filtered_attrs)
|
97 |
except requests.exceptions.RequestException as e:
|
98 |
logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}")
|
99 |
|
|
|
100 |
return all_attributes
|
101 |
|
102 |
def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str:
|
@@ -109,14 +153,17 @@ def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str:
|
|
109 |
def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]:
|
110 |
"""Extract APR value and timestamp from JSON value"""
|
111 |
try:
|
|
|
|
|
|
|
112 |
# The APR value is stored in the json_value field
|
113 |
if attr["json_value"] is None:
|
114 |
-
logger.
|
115 |
-
return {"apr": None, "timestamp": None, "agent_id":
|
116 |
|
117 |
# If json_value is a string, parse it
|
118 |
if isinstance(attr["json_value"], str):
|
119 |
-
logger.
|
120 |
json_data = json.loads(attr["json_value"])
|
121 |
else:
|
122 |
json_data = attr["json_value"]
|
@@ -124,29 +171,20 @@ def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]:
|
|
124 |
apr = json_data.get("apr")
|
125 |
timestamp = json_data.get("timestamp")
|
126 |
|
127 |
-
logger.
|
128 |
|
129 |
# Convert timestamp to datetime if it exists
|
130 |
timestamp_dt = None
|
131 |
if timestamp:
|
132 |
-
# Just use the standard conversion without timezone specification
|
133 |
timestamp_dt = datetime.fromtimestamp(timestamp)
|
134 |
-
logger.info(f"Converted timestamp: {timestamp_dt}")
|
135 |
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
logger.info(f"Current local time: {local_now}")
|
140 |
-
logger.info(f"Difference between API time and local time (hours): {(timestamp_dt - local_now).total_seconds() / 3600:.2f}")
|
141 |
-
except Exception as e:
|
142 |
-
logger.error(f"Error calculating time difference: {e}")
|
143 |
-
else:
|
144 |
-
logger.warning(f"No timestamp in data for agent_id: {attr.get('agent_id')}")
|
145 |
-
|
146 |
-
return {"apr": apr, "timestamp": timestamp_dt, "agent_id": attr["agent_id"], "is_dummy": False}
|
147 |
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
148 |
logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}")
|
149 |
-
|
|
|
150 |
|
151 |
def fetch_apr_data_from_db():
|
152 |
"""
|
@@ -154,13 +192,11 @@ def fetch_apr_data_from_db():
|
|
154 |
"""
|
155 |
global global_df
|
156 |
|
157 |
-
|
158 |
-
# Based on the logs, we're seeing ~6 hour difference
|
159 |
-
# If HF is showing earlier times than local, use a negative value
|
160 |
-
TIMEZONE_OFFSET_HOURS = -3 # Adjust based on observed differences
|
161 |
|
162 |
try:
|
163 |
# Step 1: Find the Modius agent type
|
|
|
164 |
modius_type = get_agent_type_by_name("Modius")
|
165 |
if not modius_type:
|
166 |
logger.error("Modius agent type not found, using placeholder data")
|
@@ -168,8 +204,10 @@ def fetch_apr_data_from_db():
|
|
168 |
return global_df
|
169 |
|
170 |
type_id = modius_type["type_id"]
|
|
|
171 |
|
172 |
# Step 2: Find the APR attribute definition
|
|
|
173 |
apr_attr_def = get_attribute_definition_by_name("APR")
|
174 |
if not apr_attr_def:
|
175 |
logger.error("APR attribute definition not found, using placeholder data")
|
@@ -177,30 +215,35 @@ def fetch_apr_data_from_db():
|
|
177 |
return global_df
|
178 |
|
179 |
attr_def_id = apr_attr_def["attr_def_id"]
|
|
|
180 |
|
181 |
# Step 3: Get all agents of type Modius
|
|
|
182 |
modius_agents = get_agents_by_type(type_id)
|
183 |
if not modius_agents:
|
184 |
logger.error("No agents of type 'Modius' found")
|
185 |
global_df = pd.DataFrame([])
|
186 |
return global_df
|
187 |
|
|
|
|
|
|
|
188 |
# Step 4: Fetch all APR values for Modius agents
|
|
|
189 |
apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id)
|
190 |
if not apr_attributes:
|
191 |
logger.error("No APR values found for 'Modius' agents")
|
192 |
global_df = pd.DataFrame([])
|
193 |
return global_df
|
194 |
|
|
|
|
|
195 |
# Step 5: Extract APR data
|
|
|
196 |
apr_data_list = []
|
197 |
for attr in apr_attributes:
|
198 |
apr_data = extract_apr_value(attr)
|
199 |
if apr_data["apr"] is not None and apr_data["timestamp"] is not None:
|
200 |
-
# Apply timezone adjustment
|
201 |
-
apr_data["timestamp"] = adjust_timestamp(apr_data["timestamp"], TIMEZONE_OFFSET_HOURS)
|
202 |
-
logger.info(f"Adjusted timestamp: {apr_data['timestamp']}")
|
203 |
-
|
204 |
# Get agent name
|
205 |
agent_name = get_agent_name(attr["agent_id"], modius_agents)
|
206 |
# Add agent name to the data
|
@@ -211,8 +254,10 @@ def fetch_apr_data_from_db():
|
|
211 |
# Mark negative values as "Performance" metrics
|
212 |
if apr_data["apr"] < 0:
|
213 |
apr_data["metric_type"] = "Performance"
|
|
|
214 |
else:
|
215 |
apr_data["metric_type"] = "APR"
|
|
|
216 |
|
217 |
apr_data_list.append(apr_data)
|
218 |
|
@@ -224,10 +269,17 @@ def fetch_apr_data_from_db():
|
|
224 |
|
225 |
global_df = pd.DataFrame(apr_data_list)
|
226 |
|
227 |
-
# Log
|
228 |
-
|
229 |
-
|
230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
|
232 |
return global_df
|
233 |
|
@@ -237,6 +289,7 @@ def fetch_apr_data_from_db():
|
|
237 |
return global_df
|
238 |
except Exception as e:
|
239 |
logger.error(f"Error fetching APR data: {e}")
|
|
|
240 |
global_df = pd.DataFrame([])
|
241 |
return global_df
|
242 |
|
@@ -428,34 +481,50 @@ def create_combined_time_series_graph(df):
|
|
428 |
)
|
429 |
return fig
|
430 |
|
431 |
-
#
|
432 |
-
|
433 |
-
|
434 |
-
logger.info(f"
|
435 |
-
logger.info("Platform/Environment info:")
|
436 |
-
logger.info(f"Host: {os.uname().nodename if hasattr(os, 'uname') else 'Unknown'}")
|
437 |
-
logger.info(f"System: {os.name}")
|
438 |
-
|
439 |
-
# Create a timestamp reference to identify the environment
|
440 |
-
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
441 |
-
logger.info(f"Environment check - current time: {current_time}")
|
442 |
|
443 |
-
#
|
444 |
-
|
445 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
|
447 |
-
|
448 |
-
for idx, row in df.iterrows():
|
449 |
-
logger.info(f"Data point {idx}: agent={row['agent_name']}, time={row['timestamp']}, apr={row['apr']}, type={row['metric_type']}")
|
450 |
|
451 |
# Create Plotly figure
|
452 |
fig = go.Figure()
|
453 |
|
454 |
# Get unique agents
|
455 |
-
unique_agents = df['agent_id'].unique()
|
456 |
-
logger.info(f"Unique agents: {[df[df['agent_id'] == agent_id]['agent_name'].iloc[0] for agent_id in unique_agents]}")
|
457 |
-
|
458 |
-
# Define a color scale for different agents
|
459 |
colors = px.colors.qualitative.Plotly[:len(unique_agents)]
|
460 |
|
461 |
# Add background shapes for APR and Performance regions
|
@@ -498,9 +567,7 @@ def create_combined_time_series_graph(df):
|
|
498 |
|
499 |
# Sort the data by timestamp
|
500 |
agent_data = agent_data.sort_values('timestamp')
|
501 |
-
|
502 |
-
logger.info(f"Agent {agent_name} timestamps: {agent_data['timestamp'].tolist()}")
|
503 |
-
|
504 |
# Add the combined line for both APR and Performance
|
505 |
fig.add_trace(
|
506 |
go.Scatter(
|
@@ -516,7 +583,7 @@ def create_combined_time_series_graph(df):
|
|
516 |
|
517 |
# Add scatter points for APR values
|
518 |
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
519 |
-
|
520 |
if not apr_data.empty:
|
521 |
fig.add_trace(
|
522 |
go.Scatter(
|
@@ -533,7 +600,7 @@ def create_combined_time_series_graph(df):
|
|
533 |
|
534 |
# Add scatter points for Performance values
|
535 |
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
536 |
-
|
537 |
if not perf_data.empty:
|
538 |
fig.add_trace(
|
539 |
go.Scatter(
|
@@ -550,7 +617,7 @@ def create_combined_time_series_graph(df):
|
|
550 |
|
551 |
# Update layout
|
552 |
fig.update_layout(
|
553 |
-
title=
|
554 |
xaxis_title="Time",
|
555 |
yaxis_title="Value",
|
556 |
template="plotly_white",
|
@@ -564,17 +631,8 @@ def create_combined_time_series_graph(df):
|
|
564 |
x=1,
|
565 |
groupclick="toggleitem"
|
566 |
),
|
567 |
-
margin=dict(r=20, l=20, t=
|
568 |
-
hovermode="closest"
|
569 |
-
annotations=[
|
570 |
-
dict(
|
571 |
-
text=f"Environment: {environment_tag} | Server Time: {current_time}",
|
572 |
-
xref="paper", yref="paper",
|
573 |
-
x=0.5, y=1.05, # Positioned above the main title
|
574 |
-
showarrow=False,
|
575 |
-
font=dict(size=10, color="gray")
|
576 |
-
)
|
577 |
-
]
|
578 |
)
|
579 |
|
580 |
# Update axes
|
@@ -1004,7 +1062,78 @@ def create_visualizations():
|
|
1004 |
|
1005 |
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl
|
1006 |
|
1007 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1008 |
def dashboard():
|
1009 |
with gr.Blocks() as demo:
|
1010 |
gr.Markdown("# Valory APR Metrics")
|
@@ -1020,15 +1149,36 @@ def dashboard():
|
|
1020 |
# Function to update the graph
|
1021 |
def update_apr_graph():
|
1022 |
# Generate visualization and get figure object directly
|
1023 |
-
|
1024 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1025 |
|
1026 |
-
# Set up the button click event
|
1027 |
-
|
1028 |
-
|
1029 |
-
|
1030 |
-
|
1031 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1032 |
|
1033 |
# Initialize the graph on load
|
1034 |
# We'll use placeholder figure initially
|
@@ -1041,6 +1191,9 @@ def dashboard():
|
|
1041 |
font=dict(size=15)
|
1042 |
)
|
1043 |
combined_graph.value = placeholder_fig
|
|
|
|
|
|
|
1044 |
|
1045 |
return demo
|
1046 |
|
|
|
19 |
# from app_trans_new import create_transcation_visualizations,create_active_agents_visualizations
|
20 |
# APR visualization functions integrated directly
|
21 |
|
22 |
+
# Set up more detailed logging
|
23 |
+
logging.basicConfig(
|
24 |
+
level=logging.DEBUG, # Change to DEBUG for more detailed logs
|
25 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
26 |
+
handlers=[
|
27 |
+
logging.FileHandler("app_debug.log"), # Log to file for persistence
|
28 |
+
logging.StreamHandler() # Also log to console
|
29 |
+
]
|
30 |
+
)
|
31 |
logger = logging.getLogger(__name__)
|
32 |
|
33 |
+
# Log the startup information
|
34 |
+
logger.info("============= APPLICATION STARTING =============")
|
35 |
+
logger.info(f"Running from directory: {os.getcwd()}")
|
36 |
+
|
37 |
# Global variable to store the data for reuse
|
38 |
global_df = None
|
39 |
|
40 |
# Configuration
|
41 |
API_BASE_URL = "https://afmdb.autonolas.tech"
|
42 |
+
logger.info(f"Using API endpoint: {API_BASE_URL}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
def get_agent_type_by_name(type_name: str) -> Dict[str, Any]:
|
45 |
"""Get agent type by name"""
|
46 |
+
url = f"{API_BASE_URL}/api/agent-types/name/{type_name}"
|
47 |
+
logger.debug(f"Calling API: {url}")
|
48 |
+
|
49 |
+
try:
|
50 |
+
response = requests.get(url)
|
51 |
+
logger.debug(f"Response status: {response.status_code}")
|
52 |
+
|
53 |
+
if response.status_code == 404:
|
54 |
+
logger.error(f"Agent type '{type_name}' not found")
|
55 |
+
return None
|
56 |
+
|
57 |
+
response.raise_for_status()
|
58 |
+
result = response.json()
|
59 |
+
logger.debug(f"Agent type response: {result}")
|
60 |
+
return result
|
61 |
+
except Exception as e:
|
62 |
+
logger.error(f"Error in get_agent_type_by_name: {e}")
|
63 |
return None
|
|
|
|
|
64 |
|
65 |
def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]:
|
66 |
"""Get attribute definition by name"""
|
67 |
+
url = f"{API_BASE_URL}/api/attributes/name/{attr_name}"
|
68 |
+
logger.debug(f"Calling API: {url}")
|
69 |
+
|
70 |
+
try:
|
71 |
+
response = requests.get(url)
|
72 |
+
logger.debug(f"Response status: {response.status_code}")
|
73 |
+
|
74 |
+
if response.status_code == 404:
|
75 |
+
logger.error(f"Attribute definition '{attr_name}' not found")
|
76 |
+
return None
|
77 |
+
|
78 |
+
response.raise_for_status()
|
79 |
+
result = response.json()
|
80 |
+
logger.debug(f"Attribute definition response: {result}")
|
81 |
+
return result
|
82 |
+
except Exception as e:
|
83 |
+
logger.error(f"Error in get_attribute_definition_by_name: {e}")
|
84 |
return None
|
|
|
|
|
85 |
|
86 |
def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]:
|
87 |
"""Get all agents of a specific type"""
|
88 |
+
url = f"{API_BASE_URL}/api/agent-types/{type_id}/agents/"
|
89 |
+
logger.debug(f"Calling API: {url}")
|
90 |
+
|
91 |
+
try:
|
92 |
+
response = requests.get(url)
|
93 |
+
logger.debug(f"Response status: {response.status_code}")
|
94 |
+
|
95 |
+
if response.status_code == 404:
|
96 |
+
logger.error(f"No agents found for type ID {type_id}")
|
97 |
+
return []
|
98 |
+
|
99 |
+
response.raise_for_status()
|
100 |
+
result = response.json()
|
101 |
+
logger.debug(f"Agents count: {len(result)}")
|
102 |
+
logger.debug(f"First few agents: {result[:2] if result else []}")
|
103 |
+
return result
|
104 |
+
except Exception as e:
|
105 |
+
logger.error(f"Error in get_agents_by_type: {e}")
|
106 |
return []
|
|
|
|
|
107 |
|
108 |
def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]:
|
109 |
"""Get all attribute values for a specific attribute definition across all agents of a given list"""
|
110 |
all_attributes = []
|
111 |
+
logger.debug(f"Getting attributes for {len(agents)} agents with attr_def_id: {attr_def_id}")
|
112 |
|
113 |
# For each agent, get their attributes and filter for the one we want
|
114 |
for agent in agents:
|
115 |
agent_id = agent["agent_id"]
|
116 |
|
117 |
# Call the /api/agents/{agent_id}/attributes/ endpoint
|
118 |
+
url = f"{API_BASE_URL}/api/agents/{agent_id}/attributes/"
|
119 |
+
logger.debug(f"Calling API for agent {agent_id}: {url}")
|
|
|
|
|
120 |
|
121 |
try:
|
122 |
+
response = requests.get(url, params={"limit": 1000})
|
123 |
+
|
124 |
+
if response.status_code == 404:
|
125 |
+
logger.error(f"No attributes found for agent ID {agent_id}")
|
126 |
+
continue
|
127 |
+
|
128 |
response.raise_for_status()
|
129 |
agent_attrs = response.json()
|
130 |
+
logger.debug(f"Agent {agent_id} has {len(agent_attrs)} attributes")
|
131 |
|
132 |
# Filter for the specific attribute definition ID
|
133 |
filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id]
|
134 |
+
logger.debug(f"Agent {agent_id} has {len(filtered_attrs)} APR attributes")
|
135 |
+
|
136 |
+
if filtered_attrs:
|
137 |
+
logger.debug(f"Sample attribute for agent {agent_id}: {filtered_attrs[0]}")
|
138 |
+
|
139 |
all_attributes.extend(filtered_attrs)
|
140 |
except requests.exceptions.RequestException as e:
|
141 |
logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}")
|
142 |
|
143 |
+
logger.info(f"Total APR attributes found across all agents: {len(all_attributes)}")
|
144 |
return all_attributes
|
145 |
|
146 |
def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str:
|
|
|
153 |
def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]:
|
154 |
"""Extract APR value and timestamp from JSON value"""
|
155 |
try:
|
156 |
+
agent_id = attr.get("agent_id", "unknown")
|
157 |
+
logger.debug(f"Extracting APR value for agent {agent_id}")
|
158 |
+
|
159 |
# The APR value is stored in the json_value field
|
160 |
if attr["json_value"] is None:
|
161 |
+
logger.debug(f"Agent {agent_id}: json_value is None")
|
162 |
+
return {"apr": None, "timestamp": None, "agent_id": agent_id, "is_dummy": False}
|
163 |
|
164 |
# If json_value is a string, parse it
|
165 |
if isinstance(attr["json_value"], str):
|
166 |
+
logger.debug(f"Agent {agent_id}: json_value is string, parsing")
|
167 |
json_data = json.loads(attr["json_value"])
|
168 |
else:
|
169 |
json_data = attr["json_value"]
|
|
|
171 |
apr = json_data.get("apr")
|
172 |
timestamp = json_data.get("timestamp")
|
173 |
|
174 |
+
logger.debug(f"Agent {agent_id}: Raw APR value: {apr}, timestamp: {timestamp}")
|
175 |
|
176 |
# Convert timestamp to datetime if it exists
|
177 |
timestamp_dt = None
|
178 |
if timestamp:
|
|
|
179 |
timestamp_dt = datetime.fromtimestamp(timestamp)
|
|
|
180 |
|
181 |
+
result = {"apr": apr, "timestamp": timestamp_dt, "agent_id": agent_id, "is_dummy": False}
|
182 |
+
logger.debug(f"Agent {agent_id}: Extracted result: {result}")
|
183 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
185 |
logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}")
|
186 |
+
logger.error(f"Problematic json_value: {attr.get('json_value')}")
|
187 |
+
return {"apr": None, "timestamp": None, "agent_id": attr.get('agent_id'), "is_dummy": False}
|
188 |
|
189 |
def fetch_apr_data_from_db():
|
190 |
"""
|
|
|
192 |
"""
|
193 |
global global_df
|
194 |
|
195 |
+
logger.info("==== Starting APR data fetch ====")
|
|
|
|
|
|
|
196 |
|
197 |
try:
|
198 |
# Step 1: Find the Modius agent type
|
199 |
+
logger.info("Finding Modius agent type")
|
200 |
modius_type = get_agent_type_by_name("Modius")
|
201 |
if not modius_type:
|
202 |
logger.error("Modius agent type not found, using placeholder data")
|
|
|
204 |
return global_df
|
205 |
|
206 |
type_id = modius_type["type_id"]
|
207 |
+
logger.info(f"Found Modius agent type with ID: {type_id}")
|
208 |
|
209 |
# Step 2: Find the APR attribute definition
|
210 |
+
logger.info("Finding APR attribute definition")
|
211 |
apr_attr_def = get_attribute_definition_by_name("APR")
|
212 |
if not apr_attr_def:
|
213 |
logger.error("APR attribute definition not found, using placeholder data")
|
|
|
215 |
return global_df
|
216 |
|
217 |
attr_def_id = apr_attr_def["attr_def_id"]
|
218 |
+
logger.info(f"Found APR attribute definition with ID: {attr_def_id}")
|
219 |
|
220 |
# Step 3: Get all agents of type Modius
|
221 |
+
logger.info(f"Getting all agents of type Modius (type_id: {type_id})")
|
222 |
modius_agents = get_agents_by_type(type_id)
|
223 |
if not modius_agents:
|
224 |
logger.error("No agents of type 'Modius' found")
|
225 |
global_df = pd.DataFrame([])
|
226 |
return global_df
|
227 |
|
228 |
+
logger.info(f"Found {len(modius_agents)} Modius agents")
|
229 |
+
logger.debug(f"Modius agents: {[{'agent_id': a['agent_id'], 'agent_name': a['agent_name']} for a in modius_agents]}")
|
230 |
+
|
231 |
# Step 4: Fetch all APR values for Modius agents
|
232 |
+
logger.info(f"Fetching APR values for all Modius agents (attr_def_id: {attr_def_id})")
|
233 |
apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id)
|
234 |
if not apr_attributes:
|
235 |
logger.error("No APR values found for 'Modius' agents")
|
236 |
global_df = pd.DataFrame([])
|
237 |
return global_df
|
238 |
|
239 |
+
logger.info(f"Found {len(apr_attributes)} APR attributes total")
|
240 |
+
|
241 |
# Step 5: Extract APR data
|
242 |
+
logger.info("Extracting APR data from attributes")
|
243 |
apr_data_list = []
|
244 |
for attr in apr_attributes:
|
245 |
apr_data = extract_apr_value(attr)
|
246 |
if apr_data["apr"] is not None and apr_data["timestamp"] is not None:
|
|
|
|
|
|
|
|
|
247 |
# Get agent name
|
248 |
agent_name = get_agent_name(attr["agent_id"], modius_agents)
|
249 |
# Add agent name to the data
|
|
|
254 |
# Mark negative values as "Performance" metrics
|
255 |
if apr_data["apr"] < 0:
|
256 |
apr_data["metric_type"] = "Performance"
|
257 |
+
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): Performance value: {apr_data['apr']}")
|
258 |
else:
|
259 |
apr_data["metric_type"] = "APR"
|
260 |
+
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}")
|
261 |
|
262 |
apr_data_list.append(apr_data)
|
263 |
|
|
|
269 |
|
270 |
global_df = pd.DataFrame(apr_data_list)
|
271 |
|
272 |
+
# Log the resulting dataframe
|
273 |
+
logger.info(f"Created DataFrame with {len(global_df)} rows")
|
274 |
+
logger.info(f"DataFrame columns: {global_df.columns.tolist()}")
|
275 |
+
logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}")
|
276 |
+
logger.info(f"Metric types count: {global_df['metric_type'].value_counts().to_dict()}")
|
277 |
+
logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}")
|
278 |
+
|
279 |
+
# Log the entire dataframe for debugging
|
280 |
+
logger.debug("Final DataFrame contents:")
|
281 |
+
for idx, row in global_df.iterrows():
|
282 |
+
logger.debug(f"Row {idx}: {row.to_dict()}")
|
283 |
|
284 |
return global_df
|
285 |
|
|
|
289 |
return global_df
|
290 |
except Exception as e:
|
291 |
logger.error(f"Error fetching APR data: {e}")
|
292 |
+
logger.exception("Exception details:")
|
293 |
global_df = pd.DataFrame([])
|
294 |
return global_df
|
295 |
|
|
|
481 |
)
|
482 |
return fig
|
483 |
|
484 |
+
# ADDED: Export full dataframe to CSV for debugging
|
485 |
+
debug_csv = "debug_graph_data.csv"
|
486 |
+
df.to_csv(debug_csv)
|
487 |
+
logger.info(f"Exported graph data to {debug_csv} for debugging")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
488 |
|
489 |
+
# ADDED: Write detailed data report
|
490 |
+
with open("debug_graph_data_report.txt", "w") as f:
|
491 |
+
f.write("==== GRAPH DATA REPORT ====\n\n")
|
492 |
+
f.write(f"Total data points: {len(df)}\n")
|
493 |
+
f.write(f"Timestamp range: {df['timestamp'].min()} to {df['timestamp'].max()}\n\n")
|
494 |
+
|
495 |
+
# Output per-agent details
|
496 |
+
unique_agents = df['agent_id'].unique()
|
497 |
+
f.write(f"Number of agents: {len(unique_agents)}\n\n")
|
498 |
+
|
499 |
+
for agent_id in unique_agents:
|
500 |
+
agent_data = df[df['agent_id'] == agent_id]
|
501 |
+
agent_name = agent_data['agent_name'].iloc[0]
|
502 |
+
|
503 |
+
f.write(f"== Agent: {agent_name} (ID: {agent_id}) ==\n")
|
504 |
+
f.write(f" Total data points: {len(agent_data)}\n")
|
505 |
+
|
506 |
+
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
507 |
+
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
508 |
+
|
509 |
+
f.write(f" APR data points: {len(apr_data)}\n")
|
510 |
+
f.write(f" Performance data points: {len(perf_data)}\n")
|
511 |
+
|
512 |
+
if not apr_data.empty:
|
513 |
+
f.write(f" APR values: {apr_data['apr'].tolist()}\n")
|
514 |
+
f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n")
|
515 |
+
|
516 |
+
if not perf_data.empty:
|
517 |
+
f.write(f" Performance values: {perf_data['apr'].tolist()}\n")
|
518 |
+
f.write(f" Performance timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in perf_data['timestamp']]}\n")
|
519 |
+
|
520 |
+
f.write("\n")
|
521 |
|
522 |
+
logger.info("Generated detailed graph data report")
|
|
|
|
|
523 |
|
524 |
# Create Plotly figure
|
525 |
fig = go.Figure()
|
526 |
|
527 |
# Get unique agents
|
|
|
|
|
|
|
|
|
528 |
colors = px.colors.qualitative.Plotly[:len(unique_agents)]
|
529 |
|
530 |
# Add background shapes for APR and Performance regions
|
|
|
567 |
|
568 |
# Sort the data by timestamp
|
569 |
agent_data = agent_data.sort_values('timestamp')
|
570 |
+
print("agent_data_combined",agent_data)
|
|
|
|
|
571 |
# Add the combined line for both APR and Performance
|
572 |
fig.add_trace(
|
573 |
go.Scatter(
|
|
|
583 |
|
584 |
# Add scatter points for APR values
|
585 |
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
586 |
+
print("apr_data_combined",apr_data)
|
587 |
if not apr_data.empty:
|
588 |
fig.add_trace(
|
589 |
go.Scatter(
|
|
|
600 |
|
601 |
# Add scatter points for Performance values
|
602 |
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
603 |
+
print("perf_data_combined",perf_data)
|
604 |
if not perf_data.empty:
|
605 |
fig.add_trace(
|
606 |
go.Scatter(
|
|
|
617 |
|
618 |
# Update layout
|
619 |
fig.update_layout(
|
620 |
+
title="APR and Performance Values for All Agents",
|
621 |
xaxis_title="Time",
|
622 |
yaxis_title="Value",
|
623 |
template="plotly_white",
|
|
|
631 |
x=1,
|
632 |
groupclick="toggleitem"
|
633 |
),
|
634 |
+
margin=dict(r=20, l=20, t=30, b=20),
|
635 |
+
hovermode="closest"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
636 |
)
|
637 |
|
638 |
# Update axes
|
|
|
1062 |
|
1063 |
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl
|
1064 |
|
1065 |
+
# Add new function to the bottom of the file, before the dashboard() function
|
1066 |
+
def add_diagnostic_controls(demo):
|
1067 |
+
"""Add diagnostic UI controls to help debug the difference between local and production"""
|
1068 |
+
with gr.Column():
|
1069 |
+
gr.Markdown("## Diagnostics")
|
1070 |
+
|
1071 |
+
diagnostic_button = gr.Button("Run Data Diagnostics")
|
1072 |
+
diagnostic_output = gr.Textbox(label="Diagnostic Results", lines=10)
|
1073 |
+
|
1074 |
+
def run_diagnostics():
|
1075 |
+
"""Function to diagnose data issues"""
|
1076 |
+
global global_df
|
1077 |
+
|
1078 |
+
if global_df is None or global_df.empty:
|
1079 |
+
return "No data available. Please click 'Refresh APR Data' first."
|
1080 |
+
|
1081 |
+
# Gather diagnostics
|
1082 |
+
result = []
|
1083 |
+
result.append(f"=== DIAGNOSTIC REPORT ===")
|
1084 |
+
result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
1085 |
+
result.append(f"API Endpoint: {API_BASE_URL}")
|
1086 |
+
result.append(f"Total data points: {len(global_df)}")
|
1087 |
+
|
1088 |
+
unique_agents = global_df['agent_id'].unique()
|
1089 |
+
result.append(f"Number of unique agents: {len(unique_agents)}")
|
1090 |
+
|
1091 |
+
# Per-agent diagnostics
|
1092 |
+
for agent_id in unique_agents:
|
1093 |
+
agent_data = global_df[global_df['agent_id'] == agent_id]
|
1094 |
+
agent_name = agent_data['agent_name'].iloc[0]
|
1095 |
+
|
1096 |
+
result.append(f"\nAgent: {agent_name} (ID: {agent_id})")
|
1097 |
+
result.append(f" Data points: {len(agent_data)}")
|
1098 |
+
|
1099 |
+
# Check APR values
|
1100 |
+
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
1101 |
+
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
1102 |
+
|
1103 |
+
result.append(f" APR points: {len(apr_data)}")
|
1104 |
+
if not apr_data.empty:
|
1105 |
+
result.append(f" APR values: {apr_data['apr'].tolist()}")
|
1106 |
+
|
1107 |
+
result.append(f" Performance points: {len(perf_data)}")
|
1108 |
+
if not perf_data.empty:
|
1109 |
+
result.append(f" Performance values: {perf_data['apr'].tolist()}")
|
1110 |
+
|
1111 |
+
# Write to file as well
|
1112 |
+
with open("latest_diagnostics.txt", "w") as f:
|
1113 |
+
f.write("\n".join(result))
|
1114 |
+
|
1115 |
+
return "\n".join(result)
|
1116 |
+
|
1117 |
+
# Fix for Gradio interface - use event listeners properly
|
1118 |
+
try:
|
1119 |
+
# Different Gradio versions have different APIs
|
1120 |
+
# Try the newer approach first
|
1121 |
+
diagnostic_button.click(
|
1122 |
+
fn=run_diagnostics,
|
1123 |
+
inputs=None,
|
1124 |
+
outputs=diagnostic_output
|
1125 |
+
)
|
1126 |
+
except TypeError:
|
1127 |
+
# Fall back to original approach
|
1128 |
+
diagnostic_button.click(
|
1129 |
+
fn=run_diagnostics,
|
1130 |
+
inputs=[],
|
1131 |
+
outputs=[diagnostic_output]
|
1132 |
+
)
|
1133 |
+
|
1134 |
+
return demo
|
1135 |
+
|
1136 |
+
# Modify dashboard function to include diagnostics
|
1137 |
def dashboard():
|
1138 |
with gr.Blocks() as demo:
|
1139 |
gr.Markdown("# Valory APR Metrics")
|
|
|
1149 |
# Function to update the graph
|
1150 |
def update_apr_graph():
|
1151 |
# Generate visualization and get figure object directly
|
1152 |
+
try:
|
1153 |
+
combined_fig, _ = generate_apr_visualizations()
|
1154 |
+
return combined_fig
|
1155 |
+
except Exception as e:
|
1156 |
+
logger.exception("Error generating APR visualization")
|
1157 |
+
# Create error figure
|
1158 |
+
error_fig = go.Figure()
|
1159 |
+
error_fig.add_annotation(
|
1160 |
+
text=f"Error: {str(e)}",
|
1161 |
+
x=0.5, y=0.5,
|
1162 |
+
showarrow=False,
|
1163 |
+
font=dict(size=15, color="red")
|
1164 |
+
)
|
1165 |
+
return error_fig
|
1166 |
|
1167 |
+
# Set up the button click event with error handling
|
1168 |
+
try:
|
1169 |
+
# Try newer Gradio API first
|
1170 |
+
refresh_btn.click(
|
1171 |
+
fn=update_apr_graph,
|
1172 |
+
inputs=None,
|
1173 |
+
outputs=combined_graph
|
1174 |
+
)
|
1175 |
+
except TypeError:
|
1176 |
+
# Fall back to original method
|
1177 |
+
refresh_btn.click(
|
1178 |
+
fn=update_apr_graph,
|
1179 |
+
inputs=[],
|
1180 |
+
outputs=[combined_graph]
|
1181 |
+
)
|
1182 |
|
1183 |
# Initialize the graph on load
|
1184 |
# We'll use placeholder figure initially
|
|
|
1191 |
font=dict(size=15)
|
1192 |
)
|
1193 |
combined_graph.value = placeholder_fig
|
1194 |
+
|
1195 |
+
# Add diagnostics section
|
1196 |
+
demo = add_diagnostic_controls(demo)
|
1197 |
|
1198 |
return demo
|
1199 |
|