FredOru commited on
Commit
1bc7a0c
·
1 Parent(s): 37c64ff

(feat) db csv et admin panels

Browse files
Files changed (13) hide show
  1. .gitignore +176 -0
  2. README copy.md +1 -0
  3. app.py +279 -233
  4. arena.py +110 -148
  5. database/estimates.csv +6 -0
  6. database/prompts.csv +6 -0
  7. database/votes.csv +11 -0
  8. db.py +68 -0
  9. poetry.lock +0 -0
  10. pyproject.toml +24 -0
  11. requirements.txt +2 -1
  12. routes.py +7 -0
  13. theme_schema_miku.json +1 -0
.gitignore ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ratings_archive.json
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # UV
100
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ #uv.lock
104
+
105
+ # poetry
106
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
107
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
108
+ # commonly ignored for libraries.
109
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
110
+ #poetry.lock
111
+
112
+ # pdm
113
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
114
+ #pdm.lock
115
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
116
+ # in version control.
117
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
118
+ .pdm.toml
119
+ .pdm-python
120
+ .pdm-build/
121
+
122
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
123
+ __pypackages__/
124
+
125
+ # Celery stuff
126
+ celerybeat-schedule
127
+ celerybeat.pid
128
+
129
+ # SageMath parsed files
130
+ *.sage.py
131
+
132
+ # Environments
133
+ .env
134
+ .venv
135
+ env/
136
+ venv/
137
+ ENV/
138
+ env.bak/
139
+ venv.bak/
140
+
141
+ # Spyder project settings
142
+ .spyderproject
143
+ .spyproject
144
+
145
+ # Rope project settings
146
+ .ropeproject
147
+
148
+ # mkdocs documentation
149
+ /site
150
+
151
+ # mypy
152
+ .mypy_cache/
153
+ .dmypy.json
154
+ dmypy.json
155
+
156
+ # Pyre type checker
157
+ .pyre/
158
+
159
+ # pytype static type analyzer
160
+ .pytype/
161
+
162
+ # Cython debug symbols
163
+ cython_debug/
164
+
165
+ # PyCharm
166
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
167
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
168
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
169
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
170
+ #.idea/
171
+
172
+ # Ruff stuff:
173
+ .ruff_cache/
174
+
175
+ # PyPI configuration file
176
+ .pypirc
README copy.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # prompt-arena
app.py CHANGED
@@ -1,254 +1,300 @@
1
  import gradio as gr
2
  import pandas as pd
3
- import os
4
- import time
5
- from threading import Thread
6
- from arena import PromptArena
7
 
8
  LABEL_A = "Proposition A"
9
  LABEL_B = "Proposition B"
10
 
11
-
12
- class PromptArenaApp:
13
- """
14
- Classe pour encapsuler l'arène et gérer l'interface Gradio.
15
- """
16
-
17
- def __init__(self, arena: PromptArena) -> None:
18
- """
19
- Initialise l'application et charge les prompts depuis le fichier CSV.
20
- """
21
- self.arena: PromptArena = arena
22
-
23
- def select_and_display_match(self):
24
- """
25
- Sélectionne un match et l'affiche.
26
-
27
- Returns:
28
- Tuple contenant:
29
- - Le texte du premier prompt
30
- - Le texte du second prompt
31
- - Un dictionnaire d'état contenant les IDs des prompts
32
- """
33
-
34
- try:
35
- prompt_a_id, prompt_b_id = self.arena.select_match()
36
- prompt_a_text = self.arena.prompts.get(prompt_a_id, "")
37
- prompt_b_text = self.arena.prompts.get(prompt_b_id, "")
38
-
39
- state = {"prompt_a_id": prompt_a_id, "prompt_b_id": prompt_b_id}
40
-
41
- return (
42
- prompt_a_text,
43
- prompt_b_text,
44
- state,
45
- gr.update(interactive=True), # button A
46
- gr.update(interactive=True), # button B
47
- gr.update(interactive=False), # match button
48
- )
49
- except Exception as e:
50
- return f"Erreur lors de la sélection d'un match: {str(e)}", "", "", {}
51
-
52
- def record_winner_a(self, state: dict[str, str]):
53
- try:
54
- prompt_a_id = state["prompt_a_id"]
55
- prompt_b_id = state["prompt_b_id"]
56
-
57
- self.arena.record_result(
58
- prompt_a_id, prompt_b_id
59
- ) # Mettre à jour la progression et le classement
60
- progress_info = self.get_progress_info()
61
- rankings_table = self.get_rankings_table()
62
-
63
- return (
64
- f"Vous avez choisi : {LABEL_A}",
65
- progress_info,
66
- rankings_table,
67
- gr.update(interactive=False), # button A
68
- gr.update(interactive=False), # button B
69
- gr.update(interactive=True), # match button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
- except Exception as e:
72
- return (
73
- f"Erreur lors de l'enregistrement du résultat: {str(e)}",
74
- "",
75
- pd.DataFrame(),
76
- )
77
-
78
- def record_winner_b(self, state: dict[str, str]):
79
- try:
80
- prompt_a_id = state["prompt_a_id"]
81
- prompt_b_id = state["prompt_b_id"]
82
-
83
- self.arena.record_result(
84
- prompt_b_id, prompt_a_id
85
- ) # Mettre à jour la progression et le classement
86
- progress_info = self.get_progress_info()
87
- rankings_table = self.get_rankings_table()
88
-
89
- return (
90
- f"Vous avez choisi : {LABEL_B}",
91
- progress_info,
92
- rankings_table,
93
- gr.update(interactive=False), # button A
94
- gr.update(interactive=False), # button B
95
- gr.update(interactive=True), # match button
96
  )
97
- except Exception as e:
98
- return (
99
- f"Erreur lors de l'enregistrement du résultat: {str(e)}",
100
- "",
101
- pd.DataFrame(),
102
- )
103
-
104
- def get_progress_info(self) -> str:
105
- """
106
- Obtient les informations sur la progression du tournoi.
107
-
108
- Returns:
109
- str: Message formaté contenant les statistiques de progression
110
- """
111
- if not self.arena:
112
- return "Aucune arène initialisée. Veuillez d'abord charger des prompts."
113
-
114
- try:
115
- progress = self.arena.get_progress()
116
-
117
- info = f"Prompts: {progress['total_prompts']}\n"
118
- info += f"Matchs joués: {progress['total_matches']}\n"
119
- info += f"Progression: {progress['progress']:.2f}%\n"
120
- info += (
121
- f"Matchs restants estimés: {progress['estimated_remaining_matches']}\n"
122
- )
123
- info += f"Incertitude moyenne (σ): {progress['avg_sigma']:.4f}"
124
-
125
- return info
126
- except Exception as e:
127
- return f"Erreur lors de la récupération de la progression: {str(e)}"
128
-
129
- def get_rankings_table(self) -> pd.DataFrame:
130
- """
131
- Obtient le classement des prompts sous forme de tableau.
132
-
133
- Returns:
134
- pd.DataFrame: Tableau de classement des prompts
135
- """
136
- if not self.arena:
137
- return pd.DataFrame([{"Erreur": "Aucune arène initialisée"}])
138
-
139
- try:
140
- rankings = self.arena.get_rankings()
141
-
142
- df = pd.DataFrame(rankings)
143
- df = df[["rank", "prompt_id", "score"]]
144
- df = df.rename(
145
- columns={
146
- "rank": "Rang",
147
- "prompt_id": "ID",
148
- "score": "Score",
149
- }
150
- )
151
-
152
- return df
153
- except Exception as e:
154
- return pd.DataFrame([{"Erreur": str(e)}])
155
-
156
- def create_ui(self) -> gr.Blocks:
157
- """
158
- Crée l'interface utilisateur Gradio.
159
-
160
- Returns:
161
- gr.Blocks: L'application Gradio configurée
162
- """
163
-
164
- with gr.Blocks(title="Prompt Arena", theme=gr.themes.Ocean()) as app:
165
- gr.Markdown('<h1 style="text-align:center;">🥊 Prompt Arena 🥊</h1>')
166
-
167
  with gr.Row():
168
- select_btn = gr.Button("Lancer un nouveau match", variant="primary")
169
 
170
  with gr.Row():
171
- proposition_a = gr.Textbox(label=LABEL_A, interactive=False)
172
- proposition_b = gr.Textbox(label=LABEL_B, interactive=False)
173
-
174
- with gr.Row():
175
- vote_a_btn = gr.Button("Choisir " + LABEL_A, interactive=False)
176
- vote_b_btn = gr.Button("Choisir " + LABEL_B, interactive=False)
177
-
178
  result = gr.Textbox("Résultat", interactive=False)
179
- progress_info = gr.Textbox(
180
- label="Progression du concours", interactive=False
181
- )
182
- rankings_table = gr.DataFrame(label="Classement des prompts")
183
- state = gr.State() # contient les IDs des prompts du match en cours
184
-
185
- select_btn.click(
186
- self.select_and_display_match,
187
- inputs=[],
188
- outputs=[
189
- proposition_a,
190
- proposition_b,
191
- state,
192
- vote_a_btn,
193
- vote_b_btn,
194
- select_btn,
195
- ],
196
  )
197
- vote_a_btn.click(
198
- self.record_winner_a,
199
- inputs=[state],
200
- outputs=[
201
- result,
202
- progress_info,
203
- rankings_table,
204
- vote_a_btn,
205
- vote_b_btn,
206
- select_btn,
207
- ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  )
209
- vote_b_btn.click(
210
- self.record_winner_b,
211
- inputs=[state],
212
- outputs=[
213
- result,
214
- progress_info,
215
- rankings_table,
216
- vote_a_btn,
217
- vote_b_btn,
218
- select_btn,
219
- ],
220
  )
221
-
222
- with gr.Row():
223
- prompt_id_box = gr.Textbox(label="ID du prompt", interactive=True)
224
- prompt_text_box = gr.Textbox(label="Texte du prompt", interactive=True)
225
- save_btn = gr.Button("Ajouter le prompt", variant="secondary")
226
-
227
- def save_prompt(prompt_id, prompt_text):
228
- self.arena.add_prompt(prompt_id, prompt_text)
229
- prompts = pd.read_csv("prompts.csv", header=None)
230
- prompts.columns = [
231
- "Texte",
232
- ] # ou adaptez selon la structure réelle
233
- return prompts
234
-
235
- prompts_lists = gr.Dataframe(label="Liste des prompts")
236
- save_btn.click(
237
- save_prompt,
238
- inputs=[prompt_id_box, prompt_text_box],
239
- outputs=prompts_lists,
240
  )
241
 
242
- gr.Row([progress_info, rankings_table])
243
-
244
- return app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
 
247
  # Exemple d'utilisation
248
  if __name__ == "__main__":
249
- # load the prompts from the CSV file
250
- prompts = pd.read_csv("prompts.csv", header=None).iloc[:, 0].tolist()
251
- arena = PromptArena(prompts=prompts)
252
- app_instance = PromptArenaApp(arena=arena)
253
- app = app_instance.create_ui()
254
- app.launch()
 
1
  import gradio as gr
2
  import pandas as pd
3
+ from arena import Arena
4
+ import plotly.graph_objs as go
5
+ import numpy as np
 
6
 
7
  LABEL_A = "Proposition A"
8
  LABEL_B = "Proposition B"
9
 
10
+ ARENA = Arena()
11
+ ARENA.init_estimates()
12
+
13
+
14
+ def select_and_display_match():
15
+ try:
16
+ prompt_a, prompt_b = ARENA.select_match()
17
+ state = {"prompt_a_id": prompt_a["id"], "prompt_b_id": prompt_b["id"]}
18
+ vote_a_btn_update = gr.update(interactive=True)
19
+ vote_b_btn_update = gr.update(interactive=True)
20
+ new_match_btn_update = gr.update(interactive=False)
21
+ return (
22
+ prompt_a["text"],
23
+ prompt_b["text"],
24
+ state,
25
+ vote_a_btn_update,
26
+ vote_b_btn_update,
27
+ new_match_btn_update,
28
+ )
29
+ except Exception as e:
30
+ return f"Erreur lors de la sélection d'un match: {str(e)}", "", "", {}
31
+
32
+
33
+ def record_winner_a(state):
34
+ try:
35
+ prompt_a_id = state["prompt_a_id"]
36
+ prompt_b_id = state["prompt_b_id"]
37
+ ARENA.record_result(prompt_a_id, prompt_b_id)
38
+ progress_info = ARENA.get_progress()
39
+ rankings_table = ARENA.get_rankings()
40
+ vote_a_btn_update = gr.update(interactive=False)
41
+ vote_b_btn_update = gr.update(interactive=False)
42
+ new_match_btn_update = gr.update(interactive=True)
43
+ return (
44
+ f"Vous avez choisi : {LABEL_A}",
45
+ progress_info,
46
+ rankings_table,
47
+ vote_a_btn_update,
48
+ vote_b_btn_update,
49
+ new_match_btn_update,
50
+ )
51
+ except Exception as e:
52
+ return (
53
+ f"Erreur lors de l'enregistrement du résultat: {str(e)}",
54
+ "",
55
+ pd.DataFrame(),
56
+ )
57
+
58
+
59
+ def record_winner_b(state):
60
+ try:
61
+ prompt_a_id = state["prompt_a_id"]
62
+ prompt_b_id = state["prompt_b_id"]
63
+ ARENA.record_result(prompt_b_id, prompt_a_id)
64
+ progress_info = ARENA.get_progress()
65
+ rankings_table = ARENA.get_rankings()
66
+ vote_a_btn_update = gr.update(interactive=False)
67
+ vote_b_btn_update = gr.update(interactive=False)
68
+ new_match_btn_update = gr.update(interactive=True)
69
+ return (
70
+ f"Vous avez choisi : {LABEL_B}",
71
+ progress_info,
72
+ rankings_table,
73
+ vote_a_btn_update,
74
+ vote_b_btn_update,
75
+ new_match_btn_update,
76
+ )
77
+ except Exception as e:
78
+ return (
79
+ f"Erreur lors de l'enregistrement du résultat: {str(e)}",
80
+ "",
81
+ pd.DataFrame(),
82
+ )
83
+
84
+
85
+ def update_table(table_name, df):
86
+ """Met à jour le fichier CSV de la table spécifiée à partir du DataFrame édité."""
87
+ ARENA.replace(table_name, df)
88
+ return None
89
+
90
+
91
+ def admin_visible(request: gr.Request):
92
+ is_admin = request.username == "admin"
93
+ return gr.update(visible=is_admin)
94
+
95
+
96
+ def welcome_user(request: gr.Request):
97
+ return request.username
98
+
99
+
100
+ def plot_estimates_distribution():
101
+ """Affiche une gaussienne par prompt (Plotly) + lignes verticales pointillées sur les moyennes."""
102
+ estimates = ARENA.load("estimates")
103
+ prompts = ARENA.load("prompts")
104
+ if estimates.empty or prompts.empty:
105
+ fig = go.Figure()
106
+ fig.add_annotation(
107
+ text="Aucune estimation disponible", x=0.5, y=0.5, showarrow=False
108
+ )
109
+ return fig
110
+ x = np.linspace(
111
+ estimates["mu"].min() - 3 * estimates["sigma"].max(),
112
+ estimates["mu"].max() + 3 * estimates["sigma"].max(),
113
+ 500,
114
+ )
115
+ fig = go.Figure()
116
+ shapes = []
117
+ # Une gaussienne par prompt
118
+ for _, row in estimates.iterrows():
119
+ mu = row["mu"]
120
+ sigma = row["sigma"]
121
+ prompt_id = row["prompt_id"] if "prompt_id" in row else row["id"]
122
+ # Chercher le nom du prompt
123
+ name = str(prompt_id)
124
+ if "name" in prompts.columns:
125
+ match = prompts[prompts["id"] == prompt_id]
126
+ if not match.empty:
127
+ name = match.iloc[0]["name"]
128
+ y = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-0.5 * ((x - mu) / sigma) ** 2)
129
+ fig.add_trace(
130
+ go.Scatter(
131
+ x=x,
132
+ y=y,
133
+ mode="lines",
134
+ name=f"{name}",
135
+ hovertemplate=f"<b>{name}</b><br>Score (mu): {mu:.2f}<br>Sigma: {sigma:.2f}<extra></extra>",
136
  )
137
+ )
138
+ # Ajout de la ligne verticale pointillée à mu (en gris)
139
+ shapes.append(
140
+ dict(
141
+ type="line",
142
+ x0=mu,
143
+ x1=mu,
144
+ y0=0,
145
+ y1=max(y),
146
+ line=dict(
147
+ color="gray",
148
+ width=2,
149
+ dash="dot",
150
+ ),
151
+ xref="x",
152
+ yref="y",
 
 
 
 
 
 
 
 
 
153
  )
154
+ )
155
+ fig.update_layout(
156
+ title="Distribution gaussienne de chaque prompt",
157
+ xaxis_title="Score (mu)",
158
+ yaxis_title="Densité",
159
+ template="plotly_white",
160
+ shapes=shapes,
161
+ )
162
+ return fig
163
+
164
+
165
+ with gr.Blocks(
166
+ title="Prompt Arena",
167
+ # theme=gr.themes.Default.load("theme_schema_miku.json"),
168
+ ) as demo:
169
+ state = gr.State()
170
+
171
+ with gr.Row():
172
+ username = gr.Markdown("")
173
+ gr.Button("Logout", link="/logout", scale=0, min_width=50)
174
+
175
+ gr.Markdown(
176
+ '<h1 style="text-align:center;"> Concours du meilleur Prompt Engineer </h1>'
177
+ )
178
+
179
+ progress_info = gr.Textbox(
180
+ label="Progression du concours",
181
+ value=ARENA.get_progress(),
182
+ interactive=False,
183
+ lines=2,
184
+ )
185
+
186
+ with gr.Tabs() as tabs:
187
+ # Onglet des Combats
188
+ with gr.TabItem("Combats"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  with gr.Row():
190
+ new_match_btn = gr.Button("Lancer un nouveau match", variant="primary")
191
 
192
  with gr.Row():
193
+ with gr.Column():
194
+ proposition_a = gr.Textbox(label=LABEL_A, interactive=False)
195
+ vote_a_btn = gr.Button("Choisir " + LABEL_A, interactive=False)
196
+ with gr.Column():
197
+ proposition_b = gr.Textbox(label=LABEL_B, interactive=False)
198
+ vote_b_btn = gr.Button("Choisir " + LABEL_B, interactive=False)
 
199
  result = gr.Textbox("Résultat", interactive=False)
200
+
201
+ # with gr.TabItem("Classement"):
202
+ rankings_table = gr.DataFrame(
203
+ label="Classement des prompts",
204
+ value=ARENA.get_rankings(),
205
+ interactive=True,
 
 
 
 
 
 
 
 
 
 
 
206
  )
207
+
208
+ # Onglet des Résultats
209
+ with gr.TabItem("Admin") as admin_tab:
210
+ with gr.Accordion("Prompts", open=False):
211
+ prompts_table = gr.DataFrame(
212
+ value=ARENA.load("prompts"),
213
+ interactive=True,
214
+ )
215
+ with gr.Accordion("Estimates", open=False):
216
+ estimates_table = gr.DataFrame(
217
+ label="Estimations",
218
+ value=ARENA.load("estimates"),
219
+ interactive=True,
220
+ )
221
+ with gr.Accordion("Votes", open=False):
222
+ votes_table = gr.DataFrame(
223
+ label="Votes",
224
+ value=ARENA.load("votes"),
225
+ interactive=True,
226
+ )
227
+ gr.Plot(plot_estimates_distribution, label="Distribution des estimations")
228
+ prompts_table.change(
229
+ update_table,
230
+ inputs=[gr.Markdown("prompts", visible=False), prompts_table],
231
+ outputs=None,
232
  )
233
+ estimates_table.change(
234
+ update_table,
235
+ inputs=[gr.Markdown("estimates", visible=False), estimates_table],
236
+ outputs=None,
 
 
 
 
 
 
 
237
  )
238
+ votes_table.change(
239
+ update_table,
240
+ inputs=[gr.Markdown("votes", visible=False), votes_table],
241
+ outputs=None,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  )
243
 
244
+ new_match_btn.click(
245
+ select_and_display_match,
246
+ inputs=[],
247
+ outputs=[
248
+ proposition_a,
249
+ proposition_b,
250
+ state,
251
+ vote_a_btn,
252
+ vote_b_btn,
253
+ new_match_btn,
254
+ ],
255
+ )
256
+
257
+ # Callbacks pour les deux onglets
258
+ vote_a_btn.click(
259
+ record_winner_a,
260
+ inputs=[state],
261
+ outputs=[
262
+ result,
263
+ progress_info,
264
+ rankings_table,
265
+ vote_a_btn,
266
+ vote_b_btn,
267
+ new_match_btn,
268
+ ],
269
+ )
270
+ vote_b_btn.click(
271
+ record_winner_b,
272
+ inputs=[state],
273
+ outputs=[
274
+ result,
275
+ progress_info,
276
+ rankings_table,
277
+ vote_a_btn,
278
+ vote_b_btn,
279
+ new_match_btn,
280
+ ],
281
+ )
282
+
283
+ demo.load(admin_visible, None, admin_tab)
284
+ demo.load(welcome_user, None, username)
285
+
286
+
287
+ def arena_auth(username, password):
288
+ if username == "admin":
289
+ return (
290
+ password == "fred"
291
+ ) # todo : mettre le mot de passe en variable d'environnement
292
+ else:
293
+ return username == password
294
 
295
 
296
  # Exemple d'utilisation
297
  if __name__ == "__main__":
298
+ demo.launch(
299
+ auth_message="Connexion à l'arène des prompts", auth=arena_auth
300
+ ) # ajouter share=True pour partager l'interface
 
 
 
arena.py CHANGED
@@ -1,151 +1,129 @@
1
- import trueskill
 
2
  import random
3
- import json
4
- import os
5
  import datetime
6
  from typing import Dict, List, Tuple, Union
 
7
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
- class PromptArena:
11
  """
12
  Une arène pour comparer et classer des prompts en utilisant l'algorithme TrueSkill.
13
-
14
- Cette classe permet d'organiser des "matchs" entre prompts où des utilisateurs
15
- choisissent leur préféré, mettant à jour les classements TrueSkill en fonction
16
- des résultats.
17
  """
18
 
19
- def __init__(
20
- self, prompts: List[str], results_file: str = "ratings_archive.json"
21
- ) -> None:
22
  """
23
- Initialise une arène de prompts.
24
-
25
- Args:
26
- prompts: Liste de textes de prompts
27
- results_file: Chemin du fichier pour sauvegarder/charger les ratings
28
  """
29
- self.prompts: Dict[str, str] = {
30
- str(idx + 1): prompt for idx, prompt in enumerate(prompts)
31
- }
32
- self.ratings: Dict[str, trueskill.Rating] = {} # {prompt_id: trueskill.Rating}
33
- self.results_file: str = results_file
34
- self.match_history: List[Dict[str, str]] = []
35
-
36
- # Charger les ratings si le fichier existe
37
- self._load_ratings()
38
-
39
- # Initialiser les ratings pour les nouveaux prompts
40
- for prompt_id in self.prompts:
41
- if prompt_id not in self.ratings:
42
- self.ratings[prompt_id] = trueskill.Rating()
43
-
44
- def _load_ratings(self) -> None:
45
- """Charge les ratings depuis un fichier JSON si disponible"""
46
- if os.path.exists(self.results_file):
47
- with open(self.results_file, "r", encoding="utf-8") as f:
48
- data = json.load(f)
49
-
50
- # Convertir les données stockées en objets trueskill.Rating
51
- for prompt_id, rating_data in data["ratings"].items():
52
- self.ratings[prompt_id] = trueskill.Rating(
53
- mu=rating_data["mu"], sigma=rating_data["sigma"]
54
- )
55
-
56
- self.match_history = data.get("match_history", [])
57
-
58
- def _save_ratings(self) -> None:
59
- """Sauvegarde les ratings et l'historique dans un fichier JSON"""
60
- data = {
61
- "ratings": {
62
- prompt_id: {"mu": rating.mu, "sigma": rating.sigma}
63
- for prompt_id, rating in self.ratings.items()
64
- },
65
- "match_history": self.match_history,
66
- }
67
-
68
- with open(self.results_file, "w", encoding="utf-8") as f:
69
- json.dump(data, f, ensure_ascii=False, indent=2)
70
-
71
- def add_prompt(self, prompt_id: str, prompt_text: str) -> None:
72
  """
73
- Ajoute un nouveau prompt à l'arène.
74
 
75
- Args:
76
- prompt_id: Identifiant unique du prompt
77
- prompt_text: Texte du prompt
78
  """
79
- self.prompts[prompt_id] = prompt_text
80
- prompts = pd.read_csv("prompts.csv", header=None)
81
- prompts.loc[len(prompts)] = [prompt_text]
82
- prompts.to_csv("prompts.csv", index=False, header=False)
83
- if prompt_id not in self.ratings:
84
- self.ratings[prompt_id] = trueskill.Rating()
85
- self._save_ratings()
86
-
87
- def select_match(self) -> Tuple[str, str]:
88
  """
89
- Sélectionne deux prompts pour un match en privilégiant ceux avec une grande incertitude.
90
-
91
- La stratégie est de sélectionner d'abord le prompt avec la plus grande incertitude (sigma),
92
- puis de trouver un adversaire avec un niveau (mu) similaire.
93
 
 
 
 
94
  Returns:
95
  Un tuple contenant les IDs des deux prompts à comparer (prompt_a, prompt_b)
96
  """
97
- # Stratégie: choisir des prompts avec sigma élevé et des niveaux similaires
98
- prompt_ids = list(self.prompts.keys())
99
-
100
- # Trier par incertitude (sigma) décroissante
101
- prompt_ids.sort(key=lambda pid: self.ratings[pid].sigma, reverse=True)
102
-
103
- # Sélectionner le premier prompt (plus grande incertitude)
104
- prompt_a = prompt_ids[0]
105
-
106
- # Pour le second, trouver un prompt proche en niveau (mu)
107
- mu_a = self.ratings[prompt_a].mu
108
 
109
- # Trier les prompts restants par proximité de mu
110
- remaining_prompts = [p for p in prompt_ids if p != prompt_a]
111
- remaining_prompts.sort(key=lambda pid: abs(self.ratings[pid].mu - mu_a))
 
 
112
 
113
- # Prendre un prompt parmi les 3 plus proches (avec un peu de randomisation)
114
- top_n = min(3, len(remaining_prompts))
115
- prompt_b = random.choice(remaining_prompts[:top_n])
 
 
116
 
117
  return prompt_a, prompt_b
118
 
119
  def record_result(self, winner_id: str, loser_id: str) -> None:
120
- """
121
- Enregistre le résultat d'un match et met à jour les ratings.
122
-
123
- Args:
124
- winner_id: ID du prompt gagnant
125
- loser_id: ID du prompt perdant
126
- """
127
  # Obtenir les ratings actuels
128
- winner_rating = self.ratings[winner_id]
129
- loser_rating = self.ratings[loser_id]
 
 
 
130
 
131
- # Mettre à jour les ratings (TrueSkill s'occupe des calculs)
132
- self.ratings[winner_id], self.ratings[loser_id] = trueskill.rate_1vs1(
133
- winner_rating, loser_rating
 
 
 
 
 
 
 
 
 
 
 
134
  )
135
 
136
- # Enregistrer le match dans l'historique
137
- self.match_history.append(
138
  {
139
- "winner": winner_id,
140
- "loser": loser_id,
141
- "timestamp": str(datetime.datetime.now()),
142
- }
143
  )
144
 
145
- # Sauvegarder les résultats
146
- self._save_ratings()
147
 
148
- def get_rankings(self) -> List[Dict[str, Union[int, str, float]]]:
149
  """
150
  Obtient le classement actuel des prompts.
151
 
@@ -153,29 +131,16 @@ class PromptArena:
153
  Liste de dictionnaires contenant le classement de chaque prompt avec
154
  ses informations (rang, id, texte, mu, sigma, score)
155
  """
156
- # Trier les prompts par "conserved expected score" = mu - 3*sigma
157
- # (une façon conservatrice d'estimer la compétence en tenant compte de l'incertitude)
158
- sorted_prompts = sorted(
159
- self.ratings.items(), key=lambda x: x[1].mu - 3 * x[1].sigma, reverse=True
 
160
  )
 
 
161
 
162
- rankings = []
163
- for i, (prompt_id, rating) in enumerate(sorted_prompts, 1):
164
- prompt_text = self.prompts.get(prompt_id, "Prompt inconnu")
165
- rankings.append(
166
- {
167
- "rank": i,
168
- "prompt_id": prompt_id,
169
- "prompt": prompt_text,
170
- "mu": rating.mu,
171
- "sigma": rating.sigma,
172
- "score": rating.mu - 3 * rating.sigma, # Score conservateur
173
- }
174
- )
175
-
176
- return rankings
177
-
178
- def get_progress(self) -> Dict[str, Union[int, float]]:
179
  """
180
  Renvoie des statistiques sur la progression du tournoi.
181
 
@@ -187,22 +152,19 @@ class PromptArena:
187
  - progress: pourcentage estimé de progression du tournoi
188
  - estimated_remaining_matches: estimation du nombre de matchs restants
189
  """
190
- total_prompts = len(self.prompts)
191
- total_matches = len(self.match_history)
 
192
 
193
- avg_sigma = sum(r.sigma for r in self.ratings.values()) / max(
194
- 1, len(self.ratings)
195
- )
196
 
197
  # Estimer quel pourcentage du tournoi est complété
198
  # En se basant sur la réduction moyenne de sigma par rapport à la valeur initiale
199
- initial_sigma = trueskill.Rating().sigma
200
  progress = min(100, max(0, (1 - avg_sigma / initial_sigma) * 100))
201
 
202
- return {
203
- "total_prompts": total_prompts,
204
- "total_matches": total_matches,
205
- "avg_sigma": avg_sigma,
206
- "progress": progress,
207
- "estimated_remaining_matches": int(total_prompts * 15) - total_matches,
208
- }
 
1
+ import trueskill as ts
2
+ import pandas as pd
3
  import random
 
 
4
  import datetime
5
  from typing import Dict, List, Tuple, Union
6
+ import db
7
  import pandas as pd
8
+ from typing import TypedDict
9
+
10
+ MU_init = ts.Rating().mu
11
+ SIGMA_init = ts.Rating().sigma
12
+
13
+
14
+ class Prompt(TypedDict):
15
+ id: int
16
+ name: str
17
+ text: str
18
 
19
 
20
+ class Arena:
21
  """
22
  Une arène pour comparer et classer des prompts en utilisant l'algorithme TrueSkill.
 
 
 
 
23
  """
24
 
25
+ def init_estimates(self, reboot=True) -> None:
 
 
26
  """
27
+ Initialise les estimations des prompts avec des ratings TrueSkill par défaut.
28
+ reboot : si le fichier estimates.csv existe déjà, on le laisse tel quel.
 
 
 
29
  """
30
+ estimates = db.load("estimates")
31
+ if not estimates.empty and reboot:
32
+ return None
33
+ if estimates.empty:
34
+ for i in db.load("prompts")["id"].to_list():
35
+ db.insert(
36
+ "estimates",
37
+ {
38
+ "prompt_id": i,
39
+ "mu": MU_init,
40
+ "sigma": SIGMA_init,
41
+ },
42
+ )
43
+
44
+ def load(self, table_name: str) -> pd.DataFrame:
45
+ """
46
+ fonction back pour l'UI.
47
+ Charge les données d'une table depuis le fichier CSV.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  """
49
+ return db.load(table_name)
50
 
51
+ def replace(self, table_name: str, df: pd.DataFrame) -> pd.DataFrame:
 
 
52
  """
53
+ fonction back pour l'UI.
54
+ Remplace le contenu d'une table par les données du fichier CSV.
55
+ Pour l'admin uniquement
 
 
 
 
 
 
56
  """
57
+ return db.replace(table_name, df)
 
 
 
58
 
59
+ def select_match(self) -> Tuple[Prompt, Prompt]:
60
+ """
61
+ Sélectionne deux prompts pour un match en privilégiant ceux avec une grande incertitude.
62
  Returns:
63
  Un tuple contenant les IDs des deux prompts à comparer (prompt_a, prompt_b)
64
  """
65
+ # le prompt le plus incertain (sigma le plus élevé)
66
+ estimates = db.load("estimates")
67
+ estimate_a = estimates.sort_values(by="sigma", ascending=False).iloc[0]
68
+
69
+ # le prompt le plus proche en niveau (mu) du prompt_a
70
+ estimate_b = (
71
+ estimates.loc[estimates["prompt_id"] != estimate_a["prompt_id"]]
72
+ .assign(delta_mu=lambda df_: abs(df_["mu"] - estimate_a["mu"]))
73
+ .sort_values(by="delta_mu", ascending=True)
74
+ .iloc[0]
75
+ )
76
 
77
+ prompts = db.load("prompts")
78
+ prompt_a = prompts.query(f"id == {estimate_a['prompt_id']}").iloc[0].to_dict()
79
+ prompt_b = prompts.query(f"id == {estimate_b['prompt_id']}").iloc[0].to_dict()
80
+ # We need to update the selection strategy to prefer prompts with high uncertainty
81
+ # but also consider prompts that are close in ranking (within 5 positions)
82
 
83
+ # Create pairs of prompts that are at most 5 positions apart in the ranking
84
+ # close_pairs = []
85
+ # for i in range(len(prompt_ids)):
86
+ # for j in range(i + 1, min(i + 6, len(prompt_ids))):
87
+ # close_pairs.append((prompt_ids[i], prompt_ids[j]))
88
 
89
  return prompt_a, prompt_b
90
 
91
  def record_result(self, winner_id: str, loser_id: str) -> None:
 
 
 
 
 
 
 
92
  # Obtenir les ratings actuels
93
+ estimates = db.load("estimates")
94
+ winner_estimate = (
95
+ estimates[estimates["prompt_id"] == winner_id].iloc[0].to_dict()
96
+ )
97
+ loser_estimate = estimates[estimates["prompt_id"] == loser_id].iloc[0].to_dict()
98
 
99
+ winner_rating = ts.Rating(winner_estimate["mu"], winner_estimate["sigma"])
100
+ loser_rating = ts.Rating(loser_estimate["mu"], loser_estimate["sigma"])
101
+
102
+ winner_new_rating, loser_new_rating = ts.rate_1vs1(winner_rating, loser_rating)
103
+
104
+ db.update(
105
+ "estimates",
106
+ winner_estimate["id"],
107
+ {"mu": winner_new_rating.mu, "sigma": winner_new_rating.sigma},
108
+ )
109
+ db.update(
110
+ "estimates",
111
+ loser_estimate["id"],
112
+ {"mu": loser_new_rating.mu, "sigma": loser_new_rating.sigma},
113
  )
114
 
115
+ db.insert(
116
+ "votes",
117
  {
118
+ "winner_id": winner_id,
119
+ "loser_id": loser_id,
120
+ # "timestamp": datetime.datetime.now().isoformat(),
121
+ },
122
  )
123
 
124
+ return None
 
125
 
126
+ def get_rankings(self) -> pd.DataFrame:
127
  """
128
  Obtient le classement actuel des prompts.
129
 
 
131
  Liste de dictionnaires contenant le classement de chaque prompt avec
132
  ses informations (rang, id, texte, mu, sigma, score)
133
  """
134
+
135
+ prompts = db.load("prompts")
136
+ estimates = db.load("estimates").drop(columns=["id"])
137
+ rankings = prompts.merge(estimates, left_on="id", right_on="prompt_id").drop(
138
+ columns=["id", "prompt_id"]
139
  )
140
+ return rankings.sort_values(by="mu", ascending=False)
141
+ # eventuellement afficher plutôt mu - 3 sigma pour être conservateur
142
 
143
+ def get_progress(self) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  """
145
  Renvoie des statistiques sur la progression du tournoi.
146
 
 
152
  - progress: pourcentage estimé de progression du tournoi
153
  - estimated_remaining_matches: estimation du nombre de matchs restants
154
  """
155
+ prompts = db.load("prompts")
156
+ estimates = db.load("estimates")
157
+ votes = db.load("votes")
158
 
159
+ avg_sigma = estimates["sigma"].mean()
 
 
160
 
161
  # Estimer quel pourcentage du tournoi est complété
162
  # En se basant sur la réduction moyenne de sigma par rapport à la valeur initiale
163
+ initial_sigma = ts.Rating().sigma
164
  progress = min(100, max(0, (1 - avg_sigma / initial_sigma) * 100))
165
 
166
+ msg = f"""{len(prompts)} propositions à départager
167
+ {len(votes)} matchs joués
168
+ {avg_sigma:.2f} d'incertitude moyenne"""
169
+
170
+ return msg
 
 
database/estimates.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ id,prompt_id,mu,sigma
2
+ 1,1,40.23772508094016,2
3
+ 2,2,26.46775376631428,4
4
+ 3,3,22.084687989982868,3.547977413152066
5
+ 4,4,24.390885439458472,3.2275080660341553
6
+ 5,5,9.29883828658184,3.5631194000412036
database/prompts.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ id,name,text
2
+ 1,A,Prompt A 100
3
+ 2,B,Prompt B 90
4
+ 3,C,Prompt C 60
5
+ 4,D,Prompt D 55
6
+ 5,E,Prompt E 30
database/votes.csv ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ id,winner_id,loser_id
2
+ 1,4,5
3
+ 2,1,2
4
+ 3,2,3
5
+ 4,4,5
6
+ 5,1,2
7
+ 6,4,5
8
+ 7,1,2
9
+ 8,4,3
10
+ 9,3,5
11
+ 10,3,5
db.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+
4
+ """
5
+ Small Database for managing prompts, estimates, and votes in a simple arena-like application.
6
+ Les données sont stockées dans des fichiers CSV.
7
+ Il faut toujours se référer au fichier et pas à la variable en mémoire.
8
+ La mémoire est propre à chaque utilisateur.
9
+ Pour être à jour des mises à jours des autres utilisateurs, il faut recharger les données depuis le fichier.
10
+ """
11
+
12
+ DATABASE = {
13
+ "prompts": {
14
+ "filename": "database/prompts.csv",
15
+ "columns": ["id", "name", "text"],
16
+ },
17
+ "estimates": {
18
+ "filename": "database/estimates.csv",
19
+ "columns": ["id", "prompt_id", "mu", "sigma"],
20
+ },
21
+ "votes": {
22
+ "filename": "database/votes.csv",
23
+ "columns": ["id", "winner_id", "loser_id"],
24
+ },
25
+ }
26
+
27
+
28
+ def load(table_name: str) -> pd.DataFrame:
29
+ try:
30
+ result = pd.read_csv(DATABASE[table_name]["filename"])
31
+ except FileNotFoundError:
32
+ result = pd.DataFrame(columns=DATABASE[table_name]["columns"])
33
+ return result
34
+
35
+
36
+ def insert(table_name: str, data: dict) -> None:
37
+ """Inserts data into the specified table.
38
+ Id is automatically generated .
39
+ example:
40
+ db.insert("prompts", {"name": "Test", "text": "This is a test prompt."})
41
+ """
42
+ df = load(table_name)
43
+ data["id"] = 1 if df.empty else df["id"].max() + 1
44
+ df = pd.concat([df, pd.DataFrame([data])], ignore_index=True)
45
+ df.to_csv(DATABASE[table_name]["filename"], index=False)
46
+
47
+
48
+ def update(table_name: str, row_id: int, data: dict) -> None:
49
+ """
50
+ Met à jour les estimations d'un prompt dans la table spécifiée par son id d'estimation.
51
+ Exemple :
52
+ db.update("estimates", 3, {"mu": 25.0, "sigma": 8.3})
53
+ """
54
+ df = load(table_name)
55
+ idx = df.index[df["id"] == row_id]
56
+ if not idx.empty:
57
+ for key, value in data.items():
58
+ df.loc[idx, key] = value
59
+ df.to_csv(DATABASE[table_name]["filename"], index=False)
60
+
61
+
62
+ def replace(table_name: str, df: pd.DataFrame) -> None:
63
+ """
64
+ Remplace la totalité du contenu de la table spécifiée par le DataFrame fourni.
65
+ Exemple :
66
+ db.replace("prompts", df)
67
+ """
68
+ df.to_csv(DATABASE[table_name]["filename"], index=False)
poetry.lock CHANGED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "prompt-arena"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = [
6
+ {name = "Fred-Oru",email = "[email protected]"}
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">=3.12,<4.0"
10
+ dependencies = [
11
+ "trueskill (>=0.4.5,<0.5.0)",
12
+ "gradio (>=4.0.0)",
13
+ "pandas (>=2.0.0)",
14
+ "plotly (>=6.1.2,<7.0.0)",
15
+ ]
16
+
17
+
18
+ [build-system]
19
+ requires = ["poetry-core>=2.0.0,<3.0.0"]
20
+ build-backend = "poetry.core.masonry.api"
21
+
22
+ [tool.poetry.group.dev.dependencies]
23
+ ipykernel = "^6.29.5"
24
+
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  trueskill>=0.4.5,<0.5.0
2
  gradio>=4.0.0
3
- pandas>=2.0.0
 
 
1
  trueskill>=0.4.5,<0.5.0
2
  gradio>=4.0.0
3
+ pandas>=2.0.0
4
+ plotly>=6.1.2,<7.0.0
routes.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ @app.get("/logout")
2
+ def logout():
3
+ response = RedirectResponse(url="/", status_code=status.HTTP_302_FOUND)
4
+ response.delete_cookie(key=f"access-token")
5
+ response.delete_cookie(key=f"access-token-unsecure")
6
+ print("Logout user!")
7
+ return response
theme_schema_miku.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"theme": {"_font": [{"__gradio_font__": true, "name": "Quicksand", "class": "google"}, {"__gradio_font__": true, "name": "ui-sans-serif", "class": "font"}, {"__gradio_font__": true, "name": "system-ui", "class": "font"}, {"__gradio_font__": true, "name": "sans-serif", "class": "font"}], "_font_mono": [{"__gradio_font__": true, "name": "IBM Plex Mono", "class": "google"}, {"__gradio_font__": true, "name": "ui-monospace", "class": "font"}, {"__gradio_font__": true, "name": "Consolas", "class": "font"}, {"__gradio_font__": true, "name": "monospace", "class": "font"}], "_stylesheets": ["https://fonts.googleapis.com/css2?family=Quicksand:wght@400;600&display=swap", "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"], "background_fill_primary": "#FFFFFF", "background_fill_primary_dark": "#000000", "background_fill_secondary": "#dce3e8", "background_fill_secondary_dark": "#242424", "block_background_fill": "#ECF2F7", "block_background_fill_dark": "#191919", "block_border_color": "#dce3e8", "block_border_color_dark": "#242424", "block_border_width": "1px", "block_info_text_color": "#191919", "block_info_text_color_dark": "#ECF2F7", "block_info_text_size": "*text_sm", "block_info_text_weight": "400", "block_label_background_fill": "#ECF2F700", "block_label_background_fill_dark": "#19191900", "block_label_border_color": "#dce3e8", "block_label_border_color_dark": "#242424", "block_label_border_width": "1px", "block_label_margin": "0", "block_label_padding": "*spacing_sm *spacing_lg", "block_label_radius": "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", "block_label_right_radius": "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", "block_label_shadow": "*block_shadow", "block_label_text_color": "#4EACEF", "block_label_text_color_dark": "#4EACEF", "block_label_text_size": "*text_sm", "block_label_text_weight": "400", "block_padding": "*spacing_xl calc(*spacing_xl + 2px)", "block_radius": "*radius_lg", "block_shadow": "#FFFFFF00", "block_shadow_dark": "#00000000", "block_title_background_fill": "#ECF2F700", "block_title_background_fill_dark": "#19191900", "block_title_border_color": "#dce3e8", "block_title_border_color_dark": "#242424", "block_title_border_width": "0px", "block_title_padding": "0", "block_title_radius": "none", "block_title_text_color": "#4EACEF", "block_title_text_color_dark": "#4EACEF", "block_title_text_size": "*text_md", "block_title_text_weight": "bold", "body_background_fill": "#FFFFFF", "body_background_fill_dark": "#000000", "body_text_color": "#191919", "body_text_color_dark": "#ECF2F7", "body_text_color_subdued": "#636668", "body_text_color_subdued_dark": "#c4c4c4", "body_text_size": "*text_md", "body_text_weight": "400", "border_color_accent": "#dce3e8", "border_color_accent_dark": "#242424", "border_color_accent_subdued": "#dce3e867", "border_color_accent_subdued_dark": "#24242467", "border_color_primary": "#dce3e8", "border_color_primary_dark": "#242424", "button_border_width": "*input_border_width", "button_border_width_dark": "*input_border_width", "button_cancel_background_fill": "#dce3e8", "button_cancel_background_fill_dark": "#242424", "button_cancel_background_fill_hover": "#d0d7db", "button_cancel_background_fill_hover_dark": "#202020", "button_cancel_border_color": "#191919", "button_cancel_border_color_dark": "#ECF2F7", "button_cancel_border_color_hover": "#202020", "button_cancel_border_color_hover_dark": "#a1c3d8", "button_cancel_text_color": "#4EACEF", "button_cancel_text_color_dark": "#4EACEF", "button_cancel_text_color_hover": "#0c6ebd", "button_cancel_text_color_hover_dark": "#0c6ebd", "button_large_padding": "*spacing_lg calc(2 * *spacing_lg)", "button_large_radius": "*radius_lg", "button_large_text_size": "*text_lg", "button_large_text_weight": "600", "button_primary_background_fill": "#4EACEF", "button_primary_background_fill_dark": "#4EACEF", "button_primary_background_fill_hover": "#0c6ebd", "button_primary_background_fill_hover_dark": "#0c6ebd", "button_primary_border_color": "#191919", "button_primary_border_color_dark": "#ECF2F7", "button_primary_border_color_hover": "#202020", "button_primary_border_color_hover_dark": "#a1c3d8", "button_primary_text_color": "#ECF2F7", "button_primary_text_color_dark": "#191919", "button_primary_text_color_hover": "#e1eaf0", "button_primary_text_color_hover_dark": "#141414", "button_secondary_background_fill": "#dce3e8", "button_secondary_background_fill_dark": "#242424", "button_secondary_background_fill_hover": "#d0d7db", "button_secondary_background_fill_hover_dark": "#202020", "button_secondary_border_color": "#dce3e8", "button_secondary_border_color_dark": "#242424", "button_secondary_border_color_hover": "#d0d7db", "button_secondary_border_color_hover_dark": "#202020", "button_secondary_text_color": "#4EACEF", "button_secondary_text_color_dark": "#4EACEF", "button_secondary_text_color_hover": "#0c6ebd", "button_secondary_text_color_hover_dark": "#0c6ebd", "button_shadow": "none", "button_shadow_active": "none", "button_shadow_hover": "none", "button_small_padding": "*spacing_sm calc(2 * *spacing_sm)", "button_small_radius": "*radius_lg", "button_small_text_size": "*text_md", "button_small_text_weight": "400", "button_transition": "background-color 0.2s ease", "chatbot_code_background_color": "#FFFFFF", "chatbot_code_background_color_dark": "#000000", "checkbox_background_color": "#dce3e8", "checkbox_background_color_dark": "#242424", "checkbox_background_color_focus": "#dce3e8", "checkbox_background_color_focus_dark": "#242424", "checkbox_background_color_hover": "#dce3e8", "checkbox_background_color_hover_dark": "#242424", "checkbox_background_color_selected": "#4EACEF", "checkbox_background_color_selected_dark": "#4EACEF", "checkbox_border_color": "#dce3e8", "checkbox_border_color_dark": "#242424", "checkbox_border_color_focus": "#4EACEF", "checkbox_border_color_focus_dark": "#4EACEF", "checkbox_border_color_hover": "#4EACEF", "checkbox_border_color_hover_dark": "#4EACEF", "checkbox_border_color_selected": "#4EACEF", "checkbox_border_color_selected_dark": "#4EACEF", "checkbox_border_radius": "*radius_sm", "checkbox_border_width": "1px", "checkbox_border_width_dark": "1px", "checkbox_check": "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")", "checkbox_label_background_fill": "#ECF2F7", "checkbox_label_background_fill_dark": "#191919", "checkbox_label_background_fill_hover": "#dce3e8", "checkbox_label_background_fill_hover_dark": "#242424", "checkbox_label_background_fill_selected": "#dce3e8", "checkbox_label_background_fill_selected_dark": "#242424", "checkbox_label_border_color": "#dce3e8", "checkbox_label_border_color_dark": "#242424", "checkbox_label_border_color_hover": "#4EACEF", "checkbox_label_border_color_hover_dark": "#4EACEF", "checkbox_label_border_width": "1px", "checkbox_label_border_width_dark": "1px", "checkbox_label_gap": "*spacing_lg", "checkbox_label_padding": "*spacing_md calc(2 * *spacing_md)", "checkbox_label_shadow": "none", "checkbox_label_text_color": "#191919", "checkbox_label_text_color_dark": "#ECF2F7", "checkbox_label_text_color_selected": "#4EACEF", "checkbox_label_text_color_selected_dark": "#4EACEF", "checkbox_label_text_size": "*text_md", "checkbox_label_text_weight": "400", "checkbox_shadow": "*input_shadow", "color_accent": "*primary_500", "color_accent_soft": "#dce3e8", "color_accent_soft_dark": "#242424", "container_radius": "*radius_lg", "embed_radius": "*radius_lg", "error_background_fill": "#dce3e8", "error_background_fill_dark": "#242424", "error_border_color": "#191919", "error_border_color_dark": "#ECF2F7", "error_border_width": "1px", "error_border_width_dark": "1px", "error_icon_color": "#b91c1c", "error_icon_color_dark": "#ef4444", "error_text_color": "#4EACEF", "error_text_color_dark": "#4EACEF", "font": "'Asap', 'ui-sans-serif', sans-serif", "font_mono": "'Fira Code', 'ui-monospace', monospace", "form_gap_width": "0px", "input_background_fill": "#dce3e8", "input_background_fill_dark": "#242424", "input_background_fill_focus": "#dce3e8", "input_background_fill_focus_dark": "#242424", "input_background_fill_hover": "#d0d7db", "input_background_fill_hover_dark": "#202020", "input_border_color": "#191919", "input_border_color_dark": "#ECF2F7", "input_border_color_focus": "#191919", "input_border_color_focus_dark": "#ECF2F7", "input_border_color_hover": "#202020", "input_border_color_hover_dark": "#a1c3d8", "input_border_width": "0px", "input_padding": "*spacing_xl", "input_placeholder_color": "#19191930", "input_placeholder_color_dark": "#ECF2F730", "input_radius": "*radius_lg", "input_shadow": "#19191900", "input_shadow_dark": "#ECF2F700", "input_shadow_focus": "#19191900", "input_shadow_focus_dark": "#ECF2F700", "input_text_size": "*text_md", "input_text_weight": "400", "layout_gap": "*spacing_xxl", "link_text_color": "#4EACEF", "link_text_color_active": "#4EACEF", "link_text_color_active_dark": "#4EACEF", "link_text_color_dark": "#4EACEF", "link_text_color_hover": "#0c6ebd", "link_text_color_hover_dark": "#0c6ebd", "link_text_color_visited": "#4EACEF", "link_text_color_visited_dark": "#4EACEF", "loader_color": "#4EACEF", "loader_color_dark": "#4EACEF", "name": "base", "neutral_100": "#e2effc", "neutral_200": "#bedff9", "neutral_300": "#84c5f5", "neutral_400": "#4eacef", "neutral_50": "#f1f8fe", "neutral_500": "#198cde", "neutral_600": "#0c6ebd", "neutral_700": "#0b5899", "neutral_800": "#0e4b7e", "neutral_900": "#113f69", "neutral_950": "#0b2846", "panel_background_fill": "#ECF2F7", "panel_background_fill_dark": "#191919", "panel_border_color": "#4EACEF", "panel_border_color_dark": "#4EACEF", "panel_border_width": "0", "primary_100": "#e2effc", "primary_200": "#bedff9", "primary_300": "#84c5f5", "primary_400": "#4eacef", "primary_50": "#f1f8fe", "primary_500": "#198cde", "primary_600": "#0c6ebd", "primary_700": "#0b5899", "primary_800": "#0e4b7e", "primary_900": "#113f69", "primary_950": "#0b2846", "prose_header_text_weight": "600", "prose_text_size": "*text_md", "prose_text_weight": "400", "radio_circle": "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")", "radius_lg": "8px", "radius_md": "6px", "radius_sm": "4px", "radius_xl": "12px", "radius_xs": "2px", "radius_xxl": "22px", "radius_xxs": "1px", "secondary_100": "#e2effc", "secondary_200": "#bedff9", "secondary_300": "#84c5f5", "secondary_400": "#4eacef", "secondary_50": "#f1f8fe", "secondary_500": "#198cde", "secondary_600": "#0c6ebd", "secondary_700": "#0b5899", "secondary_800": "#0e4b7e", "secondary_900": "#113f69", "secondary_950": "#0b2846", "section_header_text_size": "*text_md", "section_header_text_weight": "400", "shadow_drop": "rgba(0,0,0,0.05) 0px 1px 2px 0px", "shadow_drop_lg": "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", "shadow_inset": "rgba(0,0,0,0.05) 0px 2px 4px 0px inset", "shadow_spread": "#FFFFFF", "shadow_spread_dark": "#000000", "slider_color": "#4EACEF", "slider_color_dark": "#4EACEF", "spacing_lg": "8px", "spacing_md": "6px", "spacing_sm": "4px", "spacing_xl": "10px", "spacing_xs": "2px", "spacing_xxl": "16px", "spacing_xxs": "1px", "stat_background_fill": "#4EACEF", "stat_background_fill_dark": "#4EACEF", "table_border_color": "#191919", "table_border_color_dark": "#ECF2F7", "table_even_background_fill": "#ECF2F7", "table_even_background_fill_dark": "#191919", "table_odd_background_fill": "#dce3e8", "table_odd_background_fill_dark": "#242424", "table_radius": "*radius_lg", "table_row_focus": "#191919", "table_row_focus_dark": "#ECF2F7", "text_lg": "16px", "text_md": "14px", "text_sm": "12px", "text_xl": "22px", "text_xs": "10px", "text_xxl": "26px", "text_xxs": "9px"}, "version": "1.2.2"}