File size: 2,490 Bytes
37d5f61
 
 
 
 
 
 
 
 
7113dc0
37d5f61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import streamlit as st
import asyncio
from src.core.styles import CUSTOM_CSS
from src.components.header import render_header
from src.components.filters import render_table_filters, render_plot_filters
from src.components.visualizations import (
    render_performance_plots,
    render_leaderboard_table,
)
from src.services.firebase import fetch_leaderboard_data

# Configure the page
st.set_page_config(
    page_title="AI-Phone Leaderboard",
    page_icon="src/static/images/favicon.png",
    layout="wide",
    initial_sidebar_state="expanded",
)

# Apply custom CSS
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)

async def main():
    # Render header
    render_header()
    
    # Fetch initial data
    full_df = await fetch_leaderboard_data()
    if full_df.empty:
        st.info("No benchmark data available yet!")
        return

    # Get unique values for filters
    models = sorted(full_df["Model"].unique())
    benchmarks = sorted(full_df["Benchmark"].unique())
    platforms = sorted(full_df["Platform"].unique())
    devices = sorted(full_df["Normalized Device ID"].unique())

    # Render table filters and get selections
    (
        selected_model_table,
        selected_benchmark_table,
        selected_platform_table,
        selected_device_table,
    ) = render_table_filters(models, benchmarks, platforms, devices)

    # Filter data for table
    table_df = full_df.copy()
    if selected_model_table != "All":
        table_df = table_df[table_df["Model"] == selected_model_table]
    if selected_benchmark_table != "All":
        table_df = table_df[table_df["Benchmark"] == selected_benchmark_table]
    if selected_platform_table != "All":
        table_df = table_df[table_df["Platform"] == selected_platform_table]
    if selected_device_table != "All":
        table_df = table_df[table_df["Normalized Device ID"] == selected_device_table]

    # Render leaderboard table
    render_leaderboard_table(table_df)

    # Performance plots section
    st.subheader("Performance Comparison")

    # Render plot filters and get selections
    selected_model_plot, selected_benchmark_plot = render_plot_filters(
        models, benchmarks
    )

    # Filter data for plots
    plot_df = full_df[
        (full_df["Model"] == selected_model_plot)
        & (full_df["Benchmark"] == selected_benchmark_plot)
    ]

    # Render performance plots
    render_performance_plots(plot_df, selected_model_plot)

if __name__ == "__main__":
    asyncio.run(main())