ivykopal commited on
Commit
7a1ad01
Β·
verified Β·
1 Parent(s): 9c3225c

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +7 -8
  2. app.py +373 -0
  3. dumpy.py +52 -0
  4. gitattributes.txt +35 -0
  5. requirements.txt +72 -0
README.md CHANGED
@@ -1,12 +1,11 @@
1
  ---
2
- title: Dashboard
3
- emoji: 🐒
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.25.0
8
  app_file: app.py
9
  pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Template for Dashboards - Multilingual Prompt Evaluation Project
3
+ emoji: πŸ“Š
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.21.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
+ ---
 
app.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from apscheduler.schedulers.background import BackgroundScheduler
2
+ import datetime
3
+ import os
4
+ from typing import Dict, Tuple
5
+ from uuid import UUID
6
+
7
+ import altair as alt
8
+ import argilla as rg
9
+ from argilla.feedback import FeedbackDataset
10
+ from argilla.client.feedback.dataset.remote.dataset import RemoteFeedbackDataset
11
+ import gradio as gr
12
+ import pandas as pd
13
+
14
+ """
15
+ This is the main file for the dashboard application. It contains the main function and the functions to obtain the data and create the charts.
16
+ It's designed as a template to recreate the dashboard for the prompt translation project of any language.
17
+
18
+ To create a new dashboard, you need several environment variables, that you can easily set in the HuggingFace Space that you are using to host the dashboard:
19
+
20
+ - SOURCE_DATASET: The dataset id of the source dataset
21
+ - SOURCE_WORKSPACE: The workspace id of the source dataset
22
+ - TARGET_RECORDS: The number of records that you have as a target to annotate. We usually set this to 500.
23
+ - ARGILLA_API_URL: Link to the Huggingface Space where the annotation effort is being hosted. For example, the Spanish one is https://somosnlp-dibt-prompt-translation-for-es.hf.space/
24
+ - ARGILLA_API_KEY: The API key to access the Huggingface Space. Please, write this as a secret in the Huggingface Space configuration.
25
+ """
26
+
27
+ # Translation of legends and titles
28
+ ANNOTATED = 'AnotovanΓ©'
29
+ NUMBER_ANNOTATED = 'Celkový počet anotÑcií'
30
+ PENDING = 'ZostΓ‘va anotovaΕ₯'
31
+
32
+ NUMBER_ANNOTATORS = "Počet anotÑtorov"
33
+ NAME = 'UΕΎΓ­vateΔΎ'
34
+ NUMBER_ANNOTATIONS = 'Počet anotÑcií'
35
+
36
+ CATEGORY = 'KategΓ³ria'
37
+
38
+
39
+ def obtain_source_target_datasets() -> (
40
+ Tuple[
41
+ FeedbackDataset | RemoteFeedbackDataset, FeedbackDataset | RemoteFeedbackDataset
42
+ ]
43
+ ):
44
+ """
45
+ This function returns the source and target datasets to be used in the application.
46
+
47
+ Returns:
48
+ A tuple with the source and target datasets. The source dataset is filtered by the response status 'pending'.
49
+
50
+ """
51
+
52
+ # Obtain the public dataset and see how many pending records are there
53
+ source_dataset = rg.FeedbackDataset.from_argilla(
54
+ os.getenv("SOURCE_DATASET"), workspace=os.getenv("SOURCE_WORKSPACE")
55
+ )
56
+ filtered_source_dataset = source_dataset.filter_by(
57
+ response_status=["pending"])
58
+
59
+ # Obtain a list of users from the private workspace
60
+ # target_dataset = rg.FeedbackDataset.from_argilla(
61
+ # os.getenv("RESULTS_DATASET"), workspace=os.getenv("RESULTS_WORKSPACE")
62
+ # )
63
+
64
+ target_dataset = source_dataset.filter_by(response_status=["submitted"])
65
+
66
+ return filtered_source_dataset, target_dataset
67
+
68
+
69
+ def get_user_annotations_dictionary(
70
+ dataset: FeedbackDataset | RemoteFeedbackDataset,
71
+ ) -> Dict[str, int]:
72
+ """
73
+ This function returns a dictionary with the username as the key and the number of annotations as the value.
74
+
75
+ Args:
76
+ dataset: The dataset to be analyzed.
77
+ Returns:
78
+ A dictionary with the username as the key and the number of annotations as the value.
79
+ """
80
+ output = {}
81
+ for record in dataset:
82
+ for response in record.responses:
83
+ if str(response.user_id) not in output.keys():
84
+ output[str(response.user_id)] = 1
85
+ else:
86
+ output[str(response.user_id)] += 1
87
+
88
+ # Changing the name of the keys, from the id to the username
89
+ for key in list(output.keys()):
90
+ output[rg.User.from_id(UUID(key)).username] = output.pop(key)
91
+
92
+ return output
93
+
94
+
95
+ def donut_chart_total() -> alt.Chart:
96
+ """
97
+ This function returns a donut chart with the progress of the total annotations.
98
+ Counts each record that has been annotated at least once.
99
+
100
+ Returns:
101
+ An altair chart with the donut chart.
102
+ """
103
+
104
+ # Load your data
105
+ annotated_records = len(target_dataset)
106
+ pending_records = int(os.getenv("TARGET_RECORDS")) - annotated_records
107
+
108
+ # Prepare data for the donut chart
109
+ source = pd.DataFrame(
110
+ {
111
+ "values": [annotated_records, pending_records],
112
+ "category": [ANNOTATED, PENDING],
113
+ # Blue for Completed, Orange for Remaining
114
+ "colors": ["#4682b4", "#e68c39"],
115
+ }
116
+ )
117
+
118
+ domain = source['category'].tolist()
119
+ range_ = source['colors'].tolist()
120
+
121
+ base = alt.Chart(source).encode(
122
+ theta=alt.Theta("values:Q", stack=True),
123
+ radius=alt.Radius(
124
+ "values", scale=alt.Scale(type="sqrt", zero=True, rangeMin=20)
125
+ ),
126
+ color=alt.Color(field="category", type="nominal", scale=alt.Scale(
127
+ domain=domain, range=range_), legend=alt.Legend(title=CATEGORY)),
128
+ )
129
+
130
+ c1 = base.mark_arc(innerRadius=20, stroke="#fff")
131
+
132
+ c2 = base.mark_text(radiusOffset=20).encode(text="values:Q")
133
+
134
+ chart = c1 + c2
135
+
136
+ return chart
137
+
138
+
139
+ def kpi_chart_remaining() -> alt.Chart:
140
+ """
141
+ This function returns a KPI chart with the remaining amount of records to be annotated.
142
+ Returns:
143
+ An altair chart with the KPI chart.
144
+ """
145
+
146
+ pending_records = int(os.getenv("TARGET_RECORDS")) - len(target_dataset)
147
+ # Assuming you have a DataFrame with user data, create a sample DataFrame
148
+ data = pd.DataFrame({"Category": [PENDING], "Value": [pending_records]})
149
+
150
+ # Create Altair chart
151
+ chart = (
152
+ alt.Chart(data)
153
+ .mark_text(fontSize=100, align="center", baseline="middle", color="#e68b39")
154
+ .encode(text="Value:N")
155
+ .properties(title=PENDING, width=250, height=200)
156
+ )
157
+
158
+ return chart
159
+
160
+
161
+ def kpi_chart_submitted() -> alt.Chart:
162
+ """
163
+ This function returns a KPI chart with the total amount of records that have been annotated.
164
+ Returns:
165
+ An altair chart with the KPI chart.
166
+ """
167
+
168
+ total = len(target_dataset)
169
+
170
+ # Assuming you have a DataFrame with user data, create a sample DataFrame
171
+ data = pd.DataFrame({"Category": [NUMBER_ANNOTATED], "Value": [total]})
172
+
173
+ # Create Altair chart
174
+ chart = (
175
+ alt.Chart(data)
176
+ .mark_text(fontSize=100, align="center", baseline="middle", color="steelblue")
177
+ .encode(text="Value:N")
178
+ .properties(title=NUMBER_ANNOTATED, width=250, height=200)
179
+ )
180
+
181
+ return chart
182
+
183
+
184
+ def kpi_chart_total_annotators() -> alt.Chart:
185
+ """
186
+ This function returns a KPI chart with the total amount of annotators.
187
+
188
+ Returns:
189
+ An altair chart with the KPI chart.
190
+ """
191
+
192
+ # Obtain the total amount of annotators
193
+ total_annotators = len(user_ids_annotations)
194
+
195
+ # Assuming you have a DataFrame with user data, create a sample DataFrame
196
+ data = pd.DataFrame(
197
+ {"Category": [NUMBER_ANNOTATORS], "Value": [total_annotators]}
198
+ )
199
+
200
+ # Create Altair chart
201
+ chart = (
202
+ alt.Chart(data)
203
+ .mark_text(fontSize=100, align="center", baseline="middle", color="steelblue")
204
+ .encode(text="Value:N")
205
+ .properties(title=NUMBER_ANNOTATORS, width=250, height=200)
206
+ )
207
+
208
+ return chart
209
+
210
+
211
+ def render_hub_user_link(hub_id: str) -> str:
212
+ """
213
+ This function returns a link to the user's profile on Hugging Face.
214
+
215
+ Args:
216
+ hub_id: The user's id on Hugging Face.
217
+
218
+ Returns:
219
+ A string with the link to the user's profile on Hugging Face.
220
+ """
221
+ link = f"https://huggingface.co/{hub_id}"
222
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{hub_id}</a>'
223
+
224
+
225
+ def obtain_top_users(user_ids_annotations: Dict[str, int], N: int = 50) -> pd.DataFrame:
226
+ """
227
+ This function returns the top N users with the most annotations.
228
+
229
+ Args:
230
+ user_ids_annotations: A dictionary with the user ids as the key and the number of annotations as the value.
231
+
232
+ Returns:
233
+ A pandas dataframe with the top N users with the most annotations.
234
+ """
235
+
236
+ dataframe = pd.DataFrame(
237
+ user_ids_annotations.items(), columns=[NAME, NUMBER_ANNOTATIONS]
238
+ )
239
+ dataframe[NAME] = dataframe[NAME].apply(render_hub_user_link)
240
+ dataframe = dataframe.sort_values(by=NUMBER_ANNOTATIONS, ascending=False)
241
+ return dataframe.head(N)
242
+
243
+
244
+ def fetch_data() -> None:
245
+ """
246
+ This function fetches the data from the source and target datasets and updates the global variables.
247
+ """
248
+
249
+ print(f"Starting to fetch data: {datetime.datetime.now()}")
250
+
251
+ global source_dataset, target_dataset, user_ids_annotations, annotated, remaining, percentage_completed, top_dataframe
252
+ source_dataset, target_dataset = obtain_source_target_datasets()
253
+ user_ids_annotations = get_user_annotations_dictionary(target_dataset)
254
+
255
+ annotated = len(target_dataset)
256
+ remaining = int(os.getenv("TARGET_RECORDS")) - annotated
257
+ percentage_completed = round(
258
+ (annotated / int(os.getenv("TARGET_RECORDS"))) * 100, 1
259
+ )
260
+
261
+ # Print the current date and time
262
+ print(f"Data fetched: {datetime.datetime.now()}")
263
+
264
+
265
+ def get_top(N=50) -> pd.DataFrame:
266
+ """
267
+ This function returns the top N users with the most annotations.
268
+
269
+ Args:
270
+ N: The number of users to be returned. 50 by default
271
+
272
+ Returns:
273
+ A pandas dataframe with the top N users with the most annotations.
274
+ """
275
+
276
+ return obtain_top_users(user_ids_annotations, N=N)
277
+
278
+
279
+ def main() -> None:
280
+
281
+ # Connect to the space with rg.init()
282
+ rg.init(
283
+ api_url=os.getenv("ARGILLA_API_URL"),
284
+ api_key=os.getenv("ARGILLA_API_KEY"),
285
+ )
286
+
287
+ # Fetch the data initially
288
+ fetch_data()
289
+
290
+ # To avoid the orange border for the Gradio elements that are in constant loading
291
+ css = """
292
+ .generating {
293
+ border: none;
294
+ }
295
+ """
296
+
297
+ with gr.Blocks(css=css) as demo:
298
+ gr.Markdown(
299
+ """
300
+ # 🌍 Slovak - Multilingual Prompt Evaluation Project
301
+
302
+ Hugging Face a @argilla vyvΓ­jajΓΊ projekt [Multilingual Prompt Evaluation Project](https://github.com/huggingface/data-is-better-together/tree/main/prompt_translation). Ide o verejnΓ½ multilinguΓ‘lny benchmark pre vyhodnocovanie jazykovΓ½ch modelov.
303
+
304
+ ## CieΔΎom je preloΕΎiΕ₯ 500 promptov
305
+ K evaluΓ‘ciΓ­ avΕ‘ak potrebujeme dΓ‘ta! Komunita vybrala 500 najlepΕ‘Γ­ch promptov, ktorΓ© budΓΊ tvoriΕ₯ benchmark, ktorΓ© sΓΊ ale v angličtine.
306
+ **Preto potrebujeme vaΕ‘u pomoc**: ak preloΕΎΓ­me vΕ‘etkΓ½ch 500 promptov, mΓ΄ΕΎeme pridaΕ₯ slovenčinu do evaluΓ‘cie jazykovΓ½ch modelov.
307
+
308
+ ## Ako sa zapojiΕ₯
309
+ NavΕ‘tΓ­vte [anotačnΓ© prostredie](https://dibt-slovak-prompt-translation-for-slovak.hf.space/), prihlΓ‘ste sa pomocou existujΓΊceho Hugging Face účtu alebo si mΓ΄ΕΎete vytvoriΕ₯ novΓ© Hugging Face konto. Po prihlΓ‘senΓ­ mΓ΄ΕΎete začaΕ₯ prekladaΕ₯.
310
+ Vďaka za vaΕ‘u pomoc! Aby sme vΓ‘m trochu pomohli, vyuΕΎili sme SeamlessM4T model pre vytvorenie nΓ‘vrhov v slovenčine. Tieto nΓ‘vrhy je moΕΎnΓ© schvΓ‘liΕ₯, upraviΕ₯ alebo prepΓ­saΕ₯ celΓ© nanovo.
311
+ """
312
+ )
313
+
314
+ gr.Markdown(
315
+ f"""
316
+ ## πŸš€ AktuΓ‘lny progres
317
+ """
318
+ )
319
+ with gr.Row():
320
+
321
+ kpi_submitted_plot = gr.Plot(label="Plot")
322
+ demo.load(
323
+ kpi_chart_submitted,
324
+ inputs=[],
325
+ outputs=[kpi_submitted_plot],
326
+ )
327
+
328
+ kpi_remaining_plot = gr.Plot(label="Plot")
329
+ demo.load(
330
+ kpi_chart_remaining,
331
+ inputs=[],
332
+ outputs=[kpi_remaining_plot],
333
+ )
334
+
335
+ donut_total_plot = gr.Plot(label="Plot")
336
+ demo.load(
337
+ donut_chart_total,
338
+ inputs=[],
339
+ outputs=[donut_total_plot],
340
+ )
341
+
342
+ gr.Markdown(
343
+ """
344
+ ## πŸ‘Ύ Sieň slΓ‘vy
345
+ Tu mΓ΄ΕΎete vidieΕ₯ najaktΓ­vnejΕ‘Γ­ch prispievateΔΎov a počet anotΓ‘ciΓ­, ktorΓ© urobili.
346
+ """
347
+ )
348
+
349
+ with gr.Row():
350
+
351
+ kpi_hall_plot = gr.Plot(label="Plot")
352
+ demo.load(
353
+ kpi_chart_total_annotators, inputs=[], outputs=[kpi_hall_plot]
354
+ )
355
+
356
+ top_df_plot = gr.Dataframe(
357
+ headers=[NAME, NUMBER_ANNOTATIONS],
358
+ datatype=[
359
+ "markdown",
360
+ "number",
361
+ ],
362
+ row_count=50,
363
+ col_count=(2, "fixed"),
364
+ interactive=False,
365
+ )
366
+ demo.load(get_top, None, [top_df_plot])
367
+
368
+ # Launch the Gradio interface
369
+ demo.launch()
370
+
371
+
372
+ if __name__ == "__main__":
373
+ main()
dumpy.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+
5
+ import argilla as rg
6
+ from huggingface_hub import HfApi
7
+
8
+ logger = logging.getLogger(__name__)
9
+ logger.setLevel(logging.INFO)
10
+
11
+ if __name__ == "__main__":
12
+ logger.info("*** Initializing Argilla session ***")
13
+ rg.init(
14
+ api_url=os.getenv("ARGILLA_API_URL"),
15
+ api_key=os.getenv("ARGILLA_API_KEY"),
16
+ extra_headers={"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"},
17
+ )
18
+
19
+ logger.info("*** Fetching dataset from Argilla ***")
20
+ dataset = rg.FeedbackDataset.from_argilla(
21
+ os.getenv("SOURCE_DATASET"),
22
+ workspace=os.getenv("SOURCE_WORKSPACE"),
23
+ )
24
+ logger.info("*** Filtering records by `response_status` ***")
25
+ dataset = dataset.filter_by(response_status=["submitted"]) # type: ignore
26
+
27
+ logger.info("*** Calculating users and annotation count ***")
28
+ output = {}
29
+ for record in dataset.records:
30
+ for response in record.responses:
31
+ if response.user_id not in output:
32
+ output[response.user_id] = 0
33
+ output[response.user_id] += 1
34
+
35
+ for key in list(output.keys()):
36
+ output[rg.User.from_id(key).username] = output.pop(key)
37
+
38
+ logger.info("*** Users and annotation count successfully calculated! ***")
39
+
40
+ logger.info("*** Dumping Python dict into `stats.json` ***")
41
+ with open("stats.json", "w") as file:
42
+ json.dump(output, file, indent=4)
43
+
44
+ logger.info("*** Uploading `stats.json` to Hugging Face Hub ***")
45
+ api = HfApi(token=os.getenv("HF_TOKEN"))
46
+ api.upload_file(
47
+ path_or_fileobj="stats.json",
48
+ path_in_repo="stats.json",
49
+ repo_id="DIBT/prompt-collective-dashboard",
50
+ repo_type="space",
51
+ )
52
+ logger.info("*** `stats.json` successfully uploaded to Hugging Face Hub! ***")
gitattributes.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ altair==5.2.0
3
+ annotated-types==0.6.0
4
+ anyio==4.2.0
5
+ apscheduler==3.10.4
6
+ argilla==1.23.0
7
+ attrs==23.2.0
8
+ backoff==2.2.1
9
+ certifi==2024.2.2
10
+ charset-normalizer==3.3.2
11
+ click==8.1.7
12
+ colorama==0.4.6
13
+ contourpy==1.2.0
14
+ cycler==0.12.1
15
+ Deprecated==1.2.14
16
+ exceptiongroup==1.2.0
17
+ fastapi==0.109.2
18
+ ffmpy==0.3.1
19
+ filelock==3.13.1
20
+ fonttools==4.48.1
21
+ fsspec==2024.2.0
22
+ gradio==4.17.0
23
+ gradio_client==0.9.0
24
+ h11==0.14.0
25
+ httpcore==1.0.2
26
+ httpx==0.26.0
27
+ huggingface-hub==0.20.3
28
+ idna==3.6
29
+ importlib-resources==6.1.1
30
+ Jinja2==3.1.3
31
+ jsonschema==4.21.1
32
+ jsonschema-specifications==2023.12.1
33
+ kiwisolver==1.4.5
34
+ markdown-it-py==3.0.0
35
+ MarkupSafe==2.1.5
36
+ matplotlib==3.8.2
37
+ mdurl==0.1.2
38
+ monotonic==1.6
39
+ numpy==1.23.5
40
+ orjson==3.9.13
41
+ packaging==23.2
42
+ pandas==1.5.3
43
+ pillow==10.2.0
44
+ pydantic==2.6.1
45
+ pydantic_core==2.16.2
46
+ pydub==0.25.1
47
+ Pygments==2.17.2
48
+ pyparsing==3.1.1
49
+ python-dateutil==2.8.2
50
+ python-multipart==0.0.7
51
+ pytz==2024.1
52
+ PyYAML==6.0.1
53
+ referencing==0.33.0
54
+ requests==2.31.0
55
+ rich==13.7.0
56
+ rpds-py==0.17.1
57
+ ruff==0.2.1
58
+ semantic-version==2.10.0
59
+ shellingham==1.5.4
60
+ six==1.16.0
61
+ sniffio==1.3.0
62
+ starlette==0.36.3
63
+ tomlkit==0.12.0
64
+ toolz==0.12.1
65
+ tqdm==4.66.1
66
+ typer==0.9.0
67
+ typing_extensions==4.9.0
68
+ urllib3==2.2.0
69
+ uvicorn==0.27.0.post1
70
+ vega-datasets==0.9.0
71
+ websockets==11.0.3
72
+ wrapt==1.14.1