Spaces:
Running
Running
gauravlochab
commited on
Commit
·
7d091ea
1
Parent(s):
044e2c9
feat: filter out non-positive APR values and update graph handling for positive values only
Browse files
app.py
CHANGED
@@ -251,15 +251,15 @@ def fetch_apr_data_from_db():
|
|
251 |
# Add is_dummy flag (all real data)
|
252 |
apr_data["is_dummy"] = False
|
253 |
|
254 |
-
#
|
255 |
-
if apr_data["apr"]
|
256 |
-
apr_data["metric_type"] = "Performance"
|
257 |
-
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): Performance value: {apr_data['apr']}")
|
258 |
-
else:
|
259 |
apr_data["metric_type"] = "APR"
|
260 |
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}")
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
263 |
|
264 |
# Convert list of dictionaries to DataFrame
|
265 |
if not apr_data_list:
|
@@ -273,7 +273,8 @@ def fetch_apr_data_from_db():
|
|
273 |
logger.info(f"Created DataFrame with {len(global_df)} rows")
|
274 |
logger.info(f"DataFrame columns: {global_df.columns.tolist()}")
|
275 |
logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}")
|
276 |
-
|
|
|
277 |
logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}")
|
278 |
|
279 |
# Log the entire dataframe for debugging
|
@@ -488,7 +489,7 @@ def create_combined_time_series_graph(df):
|
|
488 |
# CRITICAL: Log the exact dataframe we're using for plotting to help debug
|
489 |
logger.info(f"Graph data - shape: {df.shape}, columns: {df.columns}")
|
490 |
logger.info(f"Graph data - unique agents: {df['agent_name'].unique().tolist()}")
|
491 |
-
logger.info(
|
492 |
logger.info(f"Graph data - min APR: {df['apr'].min()}, max APR: {df['apr'].max()}")
|
493 |
|
494 |
# Export full dataframe to CSV for debugging
|
@@ -514,19 +515,13 @@ def create_combined_time_series_graph(df):
|
|
514 |
f.write(f" Total data points: {len(agent_data)}\n")
|
515 |
|
516 |
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
517 |
-
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
518 |
|
519 |
f.write(f" APR data points: {len(apr_data)}\n")
|
520 |
-
f.write(f" Performance data points: {len(perf_data)}\n")
|
521 |
|
522 |
if not apr_data.empty:
|
523 |
f.write(f" APR values: {apr_data['apr'].tolist()}\n")
|
524 |
f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n")
|
525 |
|
526 |
-
if not perf_data.empty:
|
527 |
-
f.write(f" Performance values: {perf_data['apr'].tolist()}\n")
|
528 |
-
f.write(f" Performance timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in perf_data['timestamp']]}\n")
|
529 |
-
|
530 |
f.write("\n")
|
531 |
|
532 |
logger.info("Generated detailed graph data report")
|
@@ -540,8 +535,9 @@ def create_combined_time_series_graph(df):
|
|
540 |
colors = px.colors.qualitative.Plotly[:len(unique_agents)]
|
541 |
|
542 |
# IMPORTANT: Fixed y-axis range that always includes -100
|
543 |
-
|
544 |
-
|
|
|
545 |
|
546 |
# Add background shapes for APR and Performance regions
|
547 |
min_time = df['timestamp'].min()
|
@@ -557,16 +553,6 @@ def create_combined_time_series_graph(df):
|
|
557 |
layer="below"
|
558 |
)
|
559 |
|
560 |
-
# Add shape for Performance region (below zero)
|
561 |
-
fig.add_shape(
|
562 |
-
type="rect",
|
563 |
-
fillcolor="rgba(255, 230, 230, 0.3)",
|
564 |
-
line=dict(width=0),
|
565 |
-
y0=min_apr, y1=0,
|
566 |
-
x0=min_time, x1=max_time,
|
567 |
-
layer="below"
|
568 |
-
)
|
569 |
-
|
570 |
# Add zero line
|
571 |
fig.add_shape(
|
572 |
type="line",
|
@@ -575,7 +561,7 @@ def create_combined_time_series_graph(df):
|
|
575 |
x0=min_time, x1=max_time
|
576 |
)
|
577 |
|
578 |
-
# MODIFIED: Changed order of trace addition -
|
579 |
# Add data for each agent
|
580 |
for i, agent_id in enumerate(unique_agents):
|
581 |
agent_data = df[df['agent_id'] == agent_id].copy()
|
@@ -590,36 +576,6 @@ def create_combined_time_series_graph(df):
|
|
590 |
for idx, row in agent_data.iterrows():
|
591 |
logger.info(f" Point {idx}: timestamp={row['timestamp']}, apr={row['apr']}, type={row['metric_type']}")
|
592 |
|
593 |
-
# First add scatter points for Performance values
|
594 |
-
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
595 |
-
if not perf_data.empty:
|
596 |
-
logger.info(f" Adding {len(perf_data)} Performance markers for {agent_name}")
|
597 |
-
for idx, row in perf_data.iterrows():
|
598 |
-
logger.info(f" Performance marker: timestamp={row['timestamp']}, apr={row['apr']}")
|
599 |
-
|
600 |
-
# Use explicit Python boolean for showlegend
|
601 |
-
is_first_point = bool(idx == perf_data.index[0])
|
602 |
-
fig.add_trace(
|
603 |
-
go.Scatter(
|
604 |
-
x=[row['timestamp']],
|
605 |
-
y=[row['apr']],
|
606 |
-
mode='markers',
|
607 |
-
marker=dict(
|
608 |
-
color='red', # Force consistent color
|
609 |
-
symbol='square',
|
610 |
-
size=16, # Make markers larger
|
611 |
-
line=dict(
|
612 |
-
width=2,
|
613 |
-
color='black'
|
614 |
-
)
|
615 |
-
),
|
616 |
-
name=f'{agent_name} Perf',
|
617 |
-
legendgroup=agent_name,
|
618 |
-
showlegend=is_first_point, # Use native Python boolean
|
619 |
-
hovertemplate='Time: %{x}<br>Performance: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>'
|
620 |
-
)
|
621 |
-
)
|
622 |
-
|
623 |
# Now add scatter points for APR values
|
624 |
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
625 |
if not apr_data.empty:
|
@@ -665,7 +621,7 @@ def create_combined_time_series_graph(df):
|
|
665 |
|
666 |
# Update layout - use simple boolean values everywhere
|
667 |
fig.update_layout(
|
668 |
-
title="APR
|
669 |
xaxis_title="Time",
|
670 |
yaxis_title="Value",
|
671 |
template="plotly_white",
|
@@ -690,8 +646,8 @@ def create_combined_time_series_graph(df):
|
|
690 |
gridcolor='rgba(0,0,0,0.1)',
|
691 |
range=[min_apr, max_apr], # Fixed range
|
692 |
tickmode='linear',
|
693 |
-
tick0
|
694 |
-
dtick=
|
695 |
)
|
696 |
|
697 |
# Update x-axis
|
@@ -758,10 +714,10 @@ def create_combined_time_series_graph(df):
|
|
758 |
|
759 |
# Simplified layout
|
760 |
simple_fig.update_layout(
|
761 |
-
title="APR
|
762 |
xaxis_title="Time",
|
763 |
yaxis_title="Value",
|
764 |
-
yaxis=dict(range=[
|
765 |
height=600,
|
766 |
width=1000
|
767 |
)
|
@@ -1179,78 +1135,7 @@ def create_visualizations():
|
|
1179 |
|
1180 |
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl
|
1181 |
|
1182 |
-
#
|
1183 |
-
def add_diagnostic_controls(demo):
|
1184 |
-
"""Add diagnostic UI controls to help debug the difference between local and production"""
|
1185 |
-
with gr.Column():
|
1186 |
-
gr.Markdown("## Diagnostics")
|
1187 |
-
|
1188 |
-
diagnostic_button = gr.Button("Run Data Diagnostics")
|
1189 |
-
diagnostic_output = gr.Textbox(label="Diagnostic Results", lines=10)
|
1190 |
-
|
1191 |
-
def run_diagnostics():
|
1192 |
-
"""Function to diagnose data issues"""
|
1193 |
-
global global_df
|
1194 |
-
|
1195 |
-
if global_df is None or global_df.empty:
|
1196 |
-
return "No data available. Please click 'Refresh APR Data' first."
|
1197 |
-
|
1198 |
-
# Gather diagnostics
|
1199 |
-
result = []
|
1200 |
-
result.append(f"=== DIAGNOSTIC REPORT ===")
|
1201 |
-
result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
1202 |
-
result.append(f"API Endpoint: {API_BASE_URL}")
|
1203 |
-
result.append(f"Total data points: {len(global_df)}")
|
1204 |
-
|
1205 |
-
unique_agents = global_df['agent_id'].unique()
|
1206 |
-
result.append(f"Number of unique agents: {len(unique_agents)}")
|
1207 |
-
|
1208 |
-
# Per-agent diagnostics
|
1209 |
-
for agent_id in unique_agents:
|
1210 |
-
agent_data = global_df[global_df['agent_id'] == agent_id]
|
1211 |
-
agent_name = agent_data['agent_name'].iloc[0]
|
1212 |
-
|
1213 |
-
result.append(f"\nAgent: {agent_name} (ID: {agent_id})")
|
1214 |
-
result.append(f" Data points: {len(agent_data)}")
|
1215 |
-
|
1216 |
-
# Check APR values
|
1217 |
-
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
1218 |
-
perf_data = agent_data[agent_data['metric_type'] == 'Performance']
|
1219 |
-
|
1220 |
-
result.append(f" APR points: {len(apr_data)}")
|
1221 |
-
if not apr_data.empty:
|
1222 |
-
result.append(f" APR values: {apr_data['apr'].tolist()}")
|
1223 |
-
|
1224 |
-
result.append(f" Performance points: {len(perf_data)}")
|
1225 |
-
if not perf_data.empty:
|
1226 |
-
result.append(f" Performance values: {perf_data['apr'].tolist()}")
|
1227 |
-
|
1228 |
-
# Write to file as well
|
1229 |
-
with open("latest_diagnostics.txt", "w") as f:
|
1230 |
-
f.write("\n".join(result))
|
1231 |
-
|
1232 |
-
return "\n".join(result)
|
1233 |
-
|
1234 |
-
# Fix for Gradio interface - use event listeners properly
|
1235 |
-
try:
|
1236 |
-
# Different Gradio versions have different APIs
|
1237 |
-
# Try the newer approach first
|
1238 |
-
diagnostic_button.click(
|
1239 |
-
fn=run_diagnostics,
|
1240 |
-
inputs=None,
|
1241 |
-
outputs=diagnostic_output
|
1242 |
-
)
|
1243 |
-
except TypeError:
|
1244 |
-
# Fall back to original approach
|
1245 |
-
diagnostic_button.click(
|
1246 |
-
fn=run_diagnostics,
|
1247 |
-
inputs=[],
|
1248 |
-
outputs=[diagnostic_output]
|
1249 |
-
)
|
1250 |
-
|
1251 |
-
return demo
|
1252 |
-
|
1253 |
-
# Modify dashboard function to include diagnostics
|
1254 |
def dashboard():
|
1255 |
with gr.Blocks() as demo:
|
1256 |
gr.Markdown("# Valory APR Metrics")
|
@@ -1308,9 +1193,6 @@ def dashboard():
|
|
1308 |
font=dict(size=15)
|
1309 |
)
|
1310 |
combined_graph.value = placeholder_fig
|
1311 |
-
|
1312 |
-
# Add diagnostics section
|
1313 |
-
demo = add_diagnostic_controls(demo)
|
1314 |
|
1315 |
return demo
|
1316 |
|
|
|
251 |
# Add is_dummy flag (all real data)
|
252 |
apr_data["is_dummy"] = False
|
253 |
|
254 |
+
# CHANGE: Only include positive APR values (greater than 0)
|
255 |
+
if apr_data["apr"] > 0:
|
|
|
|
|
|
|
256 |
apr_data["metric_type"] = "APR"
|
257 |
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}")
|
258 |
+
# Add to the data list only if value is positive
|
259 |
+
apr_data_list.append(apr_data)
|
260 |
+
else:
|
261 |
+
# Log that we're skipping non-positive values
|
262 |
+
logger.debug(f"Skipping non-positive value for agent {agent_name} ({attr['agent_id']}): {apr_data['apr']}")
|
263 |
|
264 |
# Convert list of dictionaries to DataFrame
|
265 |
if not apr_data_list:
|
|
|
273 |
logger.info(f"Created DataFrame with {len(global_df)} rows")
|
274 |
logger.info(f"DataFrame columns: {global_df.columns.tolist()}")
|
275 |
logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}")
|
276 |
+
# After filtering, all values are APR type
|
277 |
+
logger.info("All values are APR type (positive values only)")
|
278 |
logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}")
|
279 |
|
280 |
# Log the entire dataframe for debugging
|
|
|
489 |
# CRITICAL: Log the exact dataframe we're using for plotting to help debug
|
490 |
logger.info(f"Graph data - shape: {df.shape}, columns: {df.columns}")
|
491 |
logger.info(f"Graph data - unique agents: {df['agent_name'].unique().tolist()}")
|
492 |
+
logger.info("Graph data - all positive APR values only")
|
493 |
logger.info(f"Graph data - min APR: {df['apr'].min()}, max APR: {df['apr'].max()}")
|
494 |
|
495 |
# Export full dataframe to CSV for debugging
|
|
|
515 |
f.write(f" Total data points: {len(agent_data)}\n")
|
516 |
|
517 |
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
|
|
518 |
|
519 |
f.write(f" APR data points: {len(apr_data)}\n")
|
|
|
520 |
|
521 |
if not apr_data.empty:
|
522 |
f.write(f" APR values: {apr_data['apr'].tolist()}\n")
|
523 |
f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n")
|
524 |
|
|
|
|
|
|
|
|
|
525 |
f.write("\n")
|
526 |
|
527 |
logger.info("Generated detailed graph data report")
|
|
|
535 |
colors = px.colors.qualitative.Plotly[:len(unique_agents)]
|
536 |
|
537 |
# IMPORTANT: Fixed y-axis range that always includes -100
|
538 |
+
# Since we're only showing positive values, adjust the range
|
539 |
+
min_apr = 0 # Start at 0
|
540 |
+
max_apr = max(df['apr'].max() * 1.1, 10) # Add 10% padding, minimum of 10
|
541 |
|
542 |
# Add background shapes for APR and Performance regions
|
543 |
min_time = df['timestamp'].min()
|
|
|
553 |
layer="below"
|
554 |
)
|
555 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
556 |
# Add zero line
|
557 |
fig.add_shape(
|
558 |
type="line",
|
|
|
561 |
x0=min_time, x1=max_time
|
562 |
)
|
563 |
|
564 |
+
# MODIFIED: Changed order of trace addition - only need APR values now
|
565 |
# Add data for each agent
|
566 |
for i, agent_id in enumerate(unique_agents):
|
567 |
agent_data = df[df['agent_id'] == agent_id].copy()
|
|
|
576 |
for idx, row in agent_data.iterrows():
|
577 |
logger.info(f" Point {idx}: timestamp={row['timestamp']}, apr={row['apr']}, type={row['metric_type']}")
|
578 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
579 |
# Now add scatter points for APR values
|
580 |
apr_data = agent_data[agent_data['metric_type'] == 'APR']
|
581 |
if not apr_data.empty:
|
|
|
621 |
|
622 |
# Update layout - use simple boolean values everywhere
|
623 |
fig.update_layout(
|
624 |
+
title="APR Values for All Agents (Positive Values Only)",
|
625 |
xaxis_title="Time",
|
626 |
yaxis_title="Value",
|
627 |
template="plotly_white",
|
|
|
646 |
gridcolor='rgba(0,0,0,0.1)',
|
647 |
range=[min_apr, max_apr], # Fixed range
|
648 |
tickmode='linear',
|
649 |
+
tick0=0,
|
650 |
+
dtick=10 # Adjusted for positive values
|
651 |
)
|
652 |
|
653 |
# Update x-axis
|
|
|
714 |
|
715 |
# Simplified layout
|
716 |
simple_fig.update_layout(
|
717 |
+
title="APR Values (Simplified View - Positive Values Only)",
|
718 |
xaxis_title="Time",
|
719 |
yaxis_title="Value",
|
720 |
+
yaxis=dict(range=[0, max_apr]),
|
721 |
height=600,
|
722 |
width=1000
|
723 |
)
|
|
|
1135 |
|
1136 |
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl
|
1137 |
|
1138 |
+
# Modify dashboard function to remove the diagnostics section
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1139 |
def dashboard():
|
1140 |
with gr.Blocks() as demo:
|
1141 |
gr.Markdown("# Valory APR Metrics")
|
|
|
1193 |
font=dict(size=15)
|
1194 |
)
|
1195 |
combined_graph.value = placeholder_fig
|
|
|
|
|
|
|
1196 |
|
1197 |
return demo
|
1198 |
|