Spaces:
Running
Running
""" | |
Leaderboard table components for the leaderboard application. | |
""" | |
import streamlit as st | |
from src.data.processors import get_model_type_style, get_rank_style | |
def render_leaderboard_table(display_df, metric_columns): | |
""" | |
Render the custom HTML leaderboard table | |
Args: | |
display_df (pandas.DataFrame): The DataFrame with the display data | |
metric_columns (list): List of metric column names | |
""" | |
from src.components.header import render_section_header | |
# Display model ranking header without the box | |
render_section_header("Model Rankings") | |
# Start building the HTML table structure | |
html_table = """ | |
<div class="fixed-table-container"> | |
<div class="scroll-container"> | |
<table class="fixed-table"> | |
<thead> | |
<tr class="header-row"> | |
<th class="fixed-column first-fixed-column" rowspan="2">Rank</th> | |
<th class="fixed-column second-fixed-column" rowspan="2">Model + Scaffolding</th> | |
<th class="model-type-cell" rowspan="2">Model Type</th> | |
""" | |
# Add the metric header | |
html_table += f'<th colspan="{len(metric_columns)}" class="metric-header">Margin To Human</th>' | |
# Continue the table structure | |
html_table += """ | |
</tr> | |
<tr class="sub-header"> | |
""" | |
# Add individual column headers for metrics | |
for col in metric_columns: | |
column_class = "overall-cell" if col == "Metric Average" else "metric-cell" | |
html_table += f'<th class="{column_class}">{col}</th>' | |
# Close the header and start the body | |
html_table += """ | |
</tr> | |
</thead> | |
<tbody> | |
""" | |
# Add the data rows | |
for i, (idx, row) in enumerate(display_df.iterrows()): | |
# Define background colors to ensure consistency | |
row_bg = "#0a0a0a" if i % 2 == 0 else "#111111" | |
# Start the row | |
html_table += f'<tr class="table-row">' | |
# Add Rank with medal styling and consistent background | |
rank_style = f"background-color: {row_bg};" # Add row background to fixed columns | |
rank_styles = get_rank_style(row["Rank"]) | |
for style_key, style_value in rank_styles.items(): | |
rank_style += f"{style_key}: {style_value};" | |
html_table += f'<td class="fixed-column first-fixed-column" style="{rank_style}">{row["Rank"]}</td>' | |
# Model name fixed column with consistent background | |
html_table += f'<td class="fixed-column second-fixed-column" title="{row["Model Name"]}" style="background-color: {row_bg}; font-weight: 500; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; text-align: center;">{row["Model Name"]}</td>' | |
# Model type cell | |
model_type = row["Model Type"] | |
type_style = f"background-color: {row_bg};" | |
model_type_styles = get_model_type_style(model_type) | |
for style_key, style_value in model_type_styles.items(): | |
if style_value: | |
type_style += f"{style_key}: {style_value};" | |
html_table += f'<td class="table-cell model-type-cell" style="{type_style}">{model_type}</td>' | |
# Add metric values with minimal styling | |
for col in metric_columns: | |
cell_class = "table-cell overall-cell" if col == "Metric Average" else "table-cell metric-cell" | |
value_text = row[col] | |
# Simple styling based on positive/negative values | |
try: | |
value = float(str(row[col]).replace(',', '')) | |
if value > 0: | |
cell_class += " positive-value" | |
elif value < 0: | |
cell_class += " negative-value" | |
except: | |
pass | |
html_table += f'<td class="{cell_class}" style="background-color: {row_bg};">{value_text}</td>' | |
html_table += "</tr>" | |
# Close the table | |
html_table += """ | |
</tbody> | |
</table> | |
</div> | |
</div> | |
""" | |
# Add metric definition below the table | |
metric_definition = """ | |
<div class="metric-definition"> | |
<h4>Margin to Human</h4> | |
<p> This metric measures what percentage of the top 1 human-to-baseline performance gap an agent can close on challenging Machine Learning Research Competition problems. For example, if the baseline is 100, top human performance is 200, and the agent scores 110, the agent has closed 10% of the gap between baseline and top human performance. Higher percentages indicate models that more effectively approach top human-level research capabilities.</p> | |
</div> | |
""" | |
# Display the custom HTML table and metric definition | |
st.markdown(html_table + metric_definition, unsafe_allow_html=True) | |
def render_empty_state(): | |
""" | |
Render an empty state when no data is available | |
""" | |
st.markdown(""" | |
<div class="warning-box"> | |
<strong>No data to display.</strong> Please select at least one task and one model type to view the data. | |
</div> | |
""", unsafe_allow_html=True) |