gremlin97 commited on
Commit
ed881c4
·
1 Parent(s): 9a43cf1

gremlin97: hf setup

Browse files
Files changed (9) hide show
  1. .gitignore +44 -0
  2. README.md +93 -13
  3. app/__init__.py +1 -0
  4. app/app.py +132 -0
  5. app/data.py +148 -0
  6. app/leaderboard.py +71 -0
  7. pyproject.toml +37 -0
  8. requirements.txt +4 -0
  9. run.py +11 -0
.gitignore ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ .env
25
+ .venv
26
+ env/
27
+ venv/
28
+ ENV/
29
+
30
+ # IDE
31
+ .idea/
32
+ .vscode/
33
+ *.swp
34
+ *.swo
35
+
36
+ # Poetry
37
+ poetry.lock
38
+ dist/
39
+
40
+ # Logs
41
+ *.log
42
+
43
+ # Local development
44
+ .DS_Store
README.md CHANGED
@@ -1,13 +1,93 @@
1
- ---
2
- title: Mars Board
3
- emoji: 💻
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.23.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Mars Vision Leaderboard
2
+
3
+ A comprehensive leaderboard for evaluating computer vision models on Mars-specific datasets. This leaderboard tracks performance across multiple tasks including classification, object detection, and segmentation.
4
+
5
+ ## Overview
6
+
7
+ This leaderboard provides a standardized evaluation framework for computer vision models on Mars-specific datasets.
8
+
9
+ ## Tasks
10
+
11
+ 1. **Classification**
12
+ - DoMars16k - Surface Types
13
+ - Mars Image - Content Analysis
14
+ - Deep Mars - Deep Learning
15
+ - Dusty vs Non-dusty - Dust Analysis
16
+
17
+ 2. **Object Detection**
18
+ - Robins & Hynek - Craters
19
+ - Lagain - Surface Features
20
+ - SPOC - Surface Properties
21
+ - AI4MARS - Surface Analysis
22
+ - MarsData - General Surface
23
+
24
+ 3. **Segmentation**
25
+ - S5Mars - Surface
26
+ - Mars-Seg - Features
27
+ - Martian Landslide
28
+ - Martian Frost
29
+
30
+ ## Getting Started
31
+
32
+ 1. Clone the repository:
33
+ ```bash
34
+ git clone https://huggingface.co/spaces/gremlin97/mars-vision-leaderboard
35
+ cd mars-vision-leaderboard
36
+ ```
37
+
38
+ 2. Install dependencies using Poetry:
39
+ ```bash
40
+ poetry install
41
+ ```
42
+
43
+ 3. Run the leaderboard:
44
+ ```bash
45
+ # From the project root directory
46
+ poetry run python run.py
47
+ ```
48
+
49
+ The leaderboard will be accessible at `http://localhost:7860` when running locally.
50
+
51
+ ## Features
52
+
53
+ - Interactive Gradio interface
54
+ - Filter models by task
55
+ - Compare performance across datasets
56
+ - Visualize results with plots
57
+ - Track best performing models
58
+ - Detailed results table
59
+
60
+ ## Contributing
61
+
62
+ To add your model's results to the leaderboard:
63
+
64
+ 1. Fork this repository
65
+ 2. Add your results to the appropriate data dictionary in `app/data.py`
66
+ 3. Submit a pull request with your changes
67
+
68
+ ### Results Format
69
+
70
+ For each task, results should be added in the following format:
71
+
72
+ ```python
73
+ TASK_DATA = {
74
+ "Model": ["Model1", "Model2", ...],
75
+ "Dataset": ["Dataset1", "Dataset1", ...],
76
+ "Metric1": [value1, value2, ...],
77
+ "Metric2": [value1, value2, ...],
78
+ }
79
+ ```
80
+
81
+ ## Project Structure
82
+
83
+ ```
84
+ mars-vision-leaderboard/
85
+ ├── app/
86
+ │ ├── __init__.py
87
+ │ ├── app.py # Main Gradio interface
88
+ │ ├── data.py # Dataset and model data
89
+ │ └── leaderboard.py # Visualization functions
90
+ ├── run.py # Application entry point
91
+ ├── pyproject.toml # Poetry dependencies
92
+ └── README.md
93
+ ```
app/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Mars Vision Leaderboard package."""
app/app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main Gradio application for the Mars Vision Leaderboard."""
2
+
3
+ import gradio as gr
4
+ from .leaderboard import update_leaderboard
5
+ from .data import TASK_DATA
6
+ import pandas as pd
7
+
8
+ TASKS = ["Classification", "Object Detection", "Segmentation"]
9
+
10
+ def get_available_models(task: str) -> list:
11
+ """Get list of available models for a task."""
12
+ data, _ = TASK_DATA[task]
13
+ return sorted(list(set(data['Model'])))
14
+
15
+ def create_interface():
16
+ """Create the Gradio interface."""
17
+ with gr.Blocks(title="Mars Vision Leaderboard", theme=gr.themes.Default()) as demo:
18
+ gr.Markdown(
19
+ """
20
+ # Mars Vision Leaderboard
21
+ A comprehensive evaluation of computer vision models on Mars-specific datasets.
22
+ This leaderboard tracks performance across multiple tasks including classification,
23
+ object detection, and segmentation.
24
+ """
25
+ )
26
+
27
+ with gr.Tabs():
28
+ with gr.TabItem("📊 General Datasets"):
29
+ with gr.Row():
30
+ with gr.Column(scale=1):
31
+ gr.Markdown("""
32
+ #### Surface Analysis
33
+ • **Robins & Hynek** - Craters
34
+ • **Lagain** - Surface Features
35
+ • **SPOC** - Surface Properties
36
+ • **AI4MARS** - Surface Analysis
37
+ • **MarsData** - General Surface
38
+ """)
39
+ with gr.Column(scale=1):
40
+ gr.Markdown("""
41
+ #### Classification
42
+ • **DoMars16k** - Surface Types
43
+ • **Mars Image** - Content Analysis
44
+ • **Deep Mars** - Deep Learning
45
+ • **Dusty vs Non-dusty** - Dust Analysis
46
+ """)
47
+ with gr.Column(scale=1):
48
+ gr.Markdown("""
49
+ #### Segmentation
50
+ • **S5Mars** - Surface
51
+ • **Mars-Seg** - Features
52
+ • **Martian Landslide**
53
+ • **Martian Frost**
54
+ """)
55
+
56
+ with gr.TabItem("🎯 Specialized Tasks"):
57
+ with gr.Row():
58
+ with gr.Column(scale=1):
59
+ gr.Markdown("""
60
+ #### Detection Tasks
61
+ • **Change Detection**
62
+ • **Outlier Detection**
63
+ • **Novelty Detection**
64
+ """)
65
+ with gr.Column(scale=1):
66
+ gr.Markdown("""
67
+ #### Feature Analysis
68
+ • **Cone Detection**
69
+ • **Dust Devil Tracks**
70
+ • **Cone Segmentation**
71
+ """)
72
+
73
+ with gr.Row():
74
+ task_dropdown = gr.Dropdown(
75
+ choices=TASKS,
76
+ value=TASKS[0],
77
+ label="Select Task",
78
+ )
79
+ model_multiselect = gr.Dropdown(
80
+ choices=get_available_models(TASKS[0]),
81
+ value=None,
82
+ label="Filter Models (Optional)",
83
+ multiselect=True,
84
+ )
85
+
86
+ with gr.Column():
87
+ gr.Markdown("### Best Performing Models Across Datasets")
88
+ best_models_output = gr.Dataframe(
89
+ interactive=False,
90
+ wrap=True,
91
+ headers=["Metric", "Rank", "Model", "Average Score"],
92
+ )
93
+
94
+ gr.Markdown("### Detailed Results")
95
+ table_output = gr.Dataframe(interactive=False, wrap=True)
96
+
97
+ with gr.Row():
98
+ plot_output1 = gr.Plot(label="Performance Plot 1")
99
+ plot_output2 = gr.Plot(label="Performance Plot 2")
100
+
101
+ def update_models(task):
102
+ return gr.Dropdown(choices=get_available_models(task))
103
+
104
+ def update_with_filters(task, models):
105
+ return update_leaderboard(task, models)
106
+
107
+ # Event handlers
108
+ task_dropdown.change(
109
+ fn=update_models,
110
+ inputs=[task_dropdown],
111
+ outputs=[model_multiselect],
112
+ )
113
+
114
+ for component in [task_dropdown, model_multiselect]:
115
+ component.change(
116
+ fn=update_with_filters,
117
+ inputs=[task_dropdown, model_multiselect],
118
+ outputs=[table_output, plot_output1, plot_output2, best_models_output],
119
+ )
120
+
121
+ # Initial update
122
+ demo.load(
123
+ fn=update_with_filters,
124
+ inputs=[task_dropdown, model_multiselect],
125
+ outputs=[table_output, plot_output1, plot_output2, best_models_output],
126
+ )
127
+
128
+ return demo
129
+
130
+ if __name__ == "__main__":
131
+ demo = create_interface()
132
+ demo.launch()
app/data.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Leaderboard data for different tasks."""
2
+
3
+ CLASSIFICATION_DATA = {
4
+ "Model": [
5
+ "ResNet-50",
6
+ "ViT-Base",
7
+ "Swin-T",
8
+ "InceptionV3",
9
+ "SqueezeNet",
10
+ "ResNet-50",
11
+ "ViT-Base",
12
+ "Swin-T",
13
+ "InceptionV3",
14
+ "SqueezeNet",
15
+ "ResNet-50",
16
+ "ViT-Base",
17
+ "Swin-T",
18
+ "InceptionV3",
19
+ "SqueezeNet",
20
+ ],
21
+ "Dataset": [
22
+ "DoMars16",
23
+ "DoMars16",
24
+ "DoMars16",
25
+ "DoMars16",
26
+ "DoMars16",
27
+ "Atmospheric Dust",
28
+ "Atmospheric Dust",
29
+ "Atmospheric Dust",
30
+ "Atmospheric Dust",
31
+ "Atmospheric Dust",
32
+ "Martian Frost",
33
+ "Martian Frost",
34
+ "Martian Frost",
35
+ "Martian Frost",
36
+ "Martian Frost",
37
+ ],
38
+ "Accuracy": [
39
+ 92.5, 94.2, 95.8, 93.1, 89.7,
40
+ 88.3, 90.1, 91.5, 89.8, 87.2,
41
+ 85.6, 87.9, 88.4, 86.7, 84.3,
42
+ ],
43
+ "F1-Score": [
44
+ 91.8, 93.5, 94.9, 92.4, 88.6,
45
+ 87.5, 89.2, 90.7, 88.9, 86.3,
46
+ 84.8, 86.9, 87.5, 85.8, 83.4,
47
+ ],
48
+ }
49
+
50
+ DETECTION_DATA = {
51
+ "Model": [
52
+ "Faster R-CNN",
53
+ "YOLOv5",
54
+ "DETR",
55
+ "RetinaNet",
56
+ "SSD",
57
+ "Faster R-CNN",
58
+ "YOLOv5",
59
+ "DETR",
60
+ "RetinaNet",
61
+ "SSD",
62
+ "Faster R-CNN",
63
+ "YOLOv5",
64
+ "DETR",
65
+ "RetinaNet",
66
+ "SSD",
67
+ ],
68
+ "Dataset": [
69
+ "Mars Crater",
70
+ "Mars Crater",
71
+ "Mars Crater",
72
+ "Mars Crater",
73
+ "Mars Crater",
74
+ "Rover Component",
75
+ "Rover Component",
76
+ "Rover Component",
77
+ "Rover Component",
78
+ "Rover Component",
79
+ "Geological Feature",
80
+ "Geological Feature",
81
+ "Geological Feature",
82
+ "Geological Feature",
83
+ "Geological Feature",
84
+ ],
85
+ "mAP": [
86
+ 78.5, 80.2, 82.1, 79.3, 77.8,
87
+ 75.6, 77.3, 78.9, 76.7, 75.1,
88
+ 73.4, 75.1, 76.7, 74.5, 73.0,
89
+ ],
90
+ "IoU": [
91
+ 0.72, 0.74, 0.76, 0.73, 0.71,
92
+ 0.69, 0.71, 0.73, 0.70, 0.68,
93
+ 0.67, 0.69, 0.71, 0.68, 0.67,
94
+ ],
95
+ }
96
+
97
+ SEGMENTATION_DATA = {
98
+ "Model": [
99
+ "U-Net",
100
+ "DeepLabV3+",
101
+ "Mask R-CNN",
102
+ "SegFormer",
103
+ "HRNet",
104
+ "U-Net",
105
+ "DeepLabV3+",
106
+ "Mask R-CNN",
107
+ "SegFormer",
108
+ "HRNet",
109
+ "U-Net",
110
+ "DeepLabV3+",
111
+ "Mask R-CNN",
112
+ "SegFormer",
113
+ "HRNet",
114
+ ],
115
+ "Dataset": [
116
+ "Mars Terrain",
117
+ "Mars Terrain",
118
+ "Mars Terrain",
119
+ "Mars Terrain",
120
+ "Mars Terrain",
121
+ "Dust Storm",
122
+ "Dust Storm",
123
+ "Dust Storm",
124
+ "Dust Storm",
125
+ "Dust Storm",
126
+ "Geological Feature",
127
+ "Geological Feature",
128
+ "Geological Feature",
129
+ "Geological Feature",
130
+ "Geological Feature",
131
+ ],
132
+ "Dice Score": [
133
+ 0.85, 0.87, 0.88, 0.86, 0.84,
134
+ 0.82, 0.84, 0.85, 0.83, 0.82,
135
+ 0.81, 0.83, 0.84, 0.82, 0.81,
136
+ ],
137
+ "IoU": [
138
+ 0.74, 0.76, 0.78, 0.75, 0.73,
139
+ 0.70, 0.72, 0.74, 0.71, 0.70,
140
+ 0.68, 0.70, 0.72, 0.69, 0.68,
141
+ ],
142
+ }
143
+
144
+ TASK_DATA = {
145
+ "Classification": (CLASSIFICATION_DATA, ["Accuracy", "F1-Score"]),
146
+ "Object Detection": (DETECTION_DATA, ["mAP", "IoU"]),
147
+ "Segmentation": (SEGMENTATION_DATA, ["Dice Score", "IoU"]),
148
+ }
app/leaderboard.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Leaderboard visualization functions."""
2
+
3
+ import pandas as pd
4
+ import plotly.express as px
5
+ from typing import Dict, List, Tuple
6
+ from plotly.graph_objects import Figure
7
+
8
+ from app.data import TASK_DATA
9
+
10
+ def create_leaderboard_table(data: Dict[str, List], selected_models: List[str] = None) -> pd.DataFrame:
11
+ """Create a formatted DataFrame for the leaderboard."""
12
+ df = pd.DataFrame(data)
13
+
14
+ if selected_models:
15
+ df = df[df['Model'].isin(selected_models)]
16
+
17
+ # Format numeric columns to 1 decimal place
18
+ numeric_cols = df.select_dtypes(include=['float64', 'int64']).columns
19
+ df[numeric_cols] = df[numeric_cols].apply(lambda x: x.apply(lambda y: f"{y:.1f}"))
20
+
21
+ return df
22
+
23
+ def create_performance_plot(data: Dict[str, List], task: str, metric: str, selected_models: List[str] = None) -> Figure:
24
+ """Create a bar plot showing model performance."""
25
+ df = pd.DataFrame(data)
26
+
27
+ if selected_models:
28
+ df = df[df['Model'].isin(selected_models)]
29
+
30
+ fig = px.bar(
31
+ df,
32
+ x="Model",
33
+ y=metric,
34
+ color="Dataset",
35
+ title=f"{task} - {metric}",
36
+ barmode="group",
37
+ )
38
+ fig.update_layout(
39
+ xaxis_title="Model",
40
+ yaxis_title=metric,
41
+ showlegend=True,
42
+ )
43
+ return fig
44
+
45
+ def get_best_models(data: Dict[str, List], metrics: List[str]) -> pd.DataFrame:
46
+ """Get the best performing models for each metric across datasets."""
47
+ df = pd.DataFrame(data)
48
+ best_models = []
49
+
50
+ for metric in metrics:
51
+ top_models = df.groupby('Model')[metric].mean().sort_values(ascending=False).head(3)
52
+ best_models.extend([
53
+ {
54
+ 'Metric': metric,
55
+ 'Rank': rank,
56
+ 'Model': model,
57
+ 'Average Score': f"{score:.1f}"
58
+ }
59
+ for rank, (model, score) in enumerate(top_models.items(), 1)
60
+ ])
61
+
62
+ return pd.DataFrame(best_models)
63
+
64
+ def update_leaderboard(task: str, selected_models: List[str] = None) -> Tuple[pd.DataFrame, Figure, Figure, pd.DataFrame]:
65
+ """Update the leaderboard based on selected task and models."""
66
+ data, metrics = TASK_DATA[task]
67
+ return (
68
+ create_leaderboard_table(data, selected_models),
69
+ *[create_performance_plot(data, task, metric, selected_models) for metric in metrics],
70
+ get_best_models(data, metrics)
71
+ )
pyproject.toml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "mars-vision-leaderboard"
3
+ version = "0.1.0"
4
+ description = "A comprehensive leaderboard for evaluating computer vision models on Mars-specific datasets"
5
+ authors = ["Your Name <[email protected]>"]
6
+ readme = "README.md"
7
+ packages = [{include = "app"}]
8
+
9
+ [tool.poetry.dependencies]
10
+ python = ">=3.9,<3.13"
11
+ gradio = "^4.19.2"
12
+ pandas = "^2.2.0"
13
+ numpy = "^1.26.0"
14
+ plotly = "^5.18.0"
15
+
16
+ [tool.poetry.group.dev.dependencies]
17
+ black = "^24.1.0"
18
+ isort = "^5.13.0"
19
+ flake8 = "^7.0.0"
20
+ pytest = "^8.0.0"
21
+
22
+ [build-system]
23
+ requires = ["poetry-core"]
24
+ build-backend = "poetry.core.masonry.api"
25
+
26
+ [tool.poetry.scripts]
27
+ mars-vision-leaderboard = "run:main"
28
+
29
+ [tool.black]
30
+ line-length = 88
31
+ target-version = ['py39']
32
+ include = '\.pyi?$'
33
+
34
+ [tool.isort]
35
+ profile = "black"
36
+ multi_line_output = 3
37
+ line_length = 88
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio>=4.19.2
2
+ pandas>=2.2.0
3
+ numpy>=1.26.0
4
+ plotly>=5.18.0
run.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Entry point for the Mars Vision Leaderboard application."""
2
+
3
+ from app.app import create_interface
4
+
5
+ def main():
6
+ """Run the Mars Vision Leaderboard application."""
7
+ demo = create_interface()
8
+ demo.launch()
9
+
10
+ if __name__ == "__main__":
11
+ main()