arthrod commited on
Commit
5912687
Β·
verified Β·
1 Parent(s): 55fbdf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1716 -350
app.py CHANGED
@@ -1,469 +1,1835 @@
1
  import marimo
2
 
3
- __generated_with = "0.9.2"
4
- app = marimo.App()
5
 
6
 
7
  @app.cell
8
- def __():
9
- import marimo as mo
10
-
11
- mo.md("# Welcome to marimo! πŸŒŠπŸƒ")
12
- return (mo,)
 
 
13
 
14
 
15
  @app.cell
16
- def __(mo):
17
- slider = mo.ui.slider(1, 22)
18
- return (slider,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
 
21
  @app.cell
22
- def __(mo, slider):
23
- mo.md(
24
- f"""
25
- marimo is a **reactive** Python notebook.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- This means that unlike traditional notebooks, marimo notebooks **run
28
- automatically** when you modify them or
29
- interact with UI elements, like this slider: {slider}.
30
 
31
- {"##" + "πŸƒ" * slider.value}
32
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  )
34
  return
35
 
36
 
37
- @app.cell(hide_code=True)
38
- def __(mo):
39
- mo.accordion(
40
- {
41
- "Tip: disabling automatic execution": mo.md(
42
- rf"""
43
- marimo lets you disable automatic execution: just go into the
44
- notebook settings and set
45
-
46
- "Runtime > On Cell Change" to "lazy".
47
-
48
- When the runtime is lazy, after running a cell, marimo marks its
49
- descendants as stale instead of automatically running them. The
50
- lazy runtime puts you in control over when cells are run, while
51
- still giving guarantees about the notebook state.
52
- """
53
- )
54
- }
55
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  return
57
 
58
 
59
- @app.cell(hide_code=True)
60
- def __(mo):
61
- mo.md(
62
- """
63
- Tip: This is a tutorial notebook. You can create your own notebooks
64
- by entering `marimo edit` at the command line.
65
- """
66
- ).callout()
67
  return
68
 
69
 
70
- @app.cell(hide_code=True)
71
- def __(mo):
72
- mo.md(
73
- """
74
- ## 1. Reactive execution
75
 
76
- A marimo notebook is made up of small blocks of Python code called
77
- cells.
78
 
79
- marimo reads your cells and models the dependencies among them: whenever
80
- a cell that defines a global variable is run, marimo
81
- **automatically runs** all cells that reference that variable.
 
 
82
 
83
- Reactivity keeps your program state and outputs in sync with your code,
84
- making for a dynamic programming environment that prevents bugs before they
85
- happen.
86
- """
87
- )
88
  return
89
 
90
 
91
- @app.cell(hide_code=True)
92
- def __(changed, mo):
93
- (
94
- mo.md(
95
- f"""
96
- **✨ Nice!** The value of `changed` is now {changed}.
97
 
98
- When you updated the value of the variable `changed`, marimo
99
- **reacted** by running this cell automatically, because this cell
100
- references the global variable `changed`.
101
 
102
- Reactivity ensures that your notebook state is always
103
- consistent, which is crucial for doing good science; it's also what
104
- enables marimo notebooks to double as tools and apps.
105
- """
106
- )
107
- if changed
108
- else mo.md(
109
- """
110
- **🌊 See it in action.** In the next cell, change the value of the
111
- variable `changed` to `True`, then click the run button.
112
- """
113
- )
114
- )
115
  return
116
 
117
 
118
  @app.cell
119
- def __():
120
- changed = False
121
- return (changed,)
 
122
 
123
 
124
- @app.cell(hide_code=True)
125
- def __(mo):
126
- mo.accordion(
127
- {
128
- "Tip: execution order": (
129
- """
130
- The order of cells on the page has no bearing on
131
- the order in which cells are executed: marimo knows that a cell
132
- reading a variable must run after the cell that defines it. This
133
- frees you to organize your code in the way that makes the most
134
- sense for you.
135
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  )
137
- }
138
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  return
140
 
141
 
142
- @app.cell(hide_code=True)
143
- def __(mo):
144
- mo.md(
145
- """
146
- **Global names must be unique.** To enable reactivity, marimo imposes a
147
- constraint on how names appear in cells: no two cells may define the same
148
- variable.
149
- """
150
- )
151
  return
152
 
153
 
154
- @app.cell(hide_code=True)
155
- def __(mo):
156
- mo.accordion(
157
- {
158
- "Tip: encapsulation": (
159
- """
160
- By encapsulating logic in functions, classes, or Python modules,
161
- you can minimize the number of global variables in your notebook.
162
- """
163
- )
164
- }
165
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  return
167
 
168
 
169
- @app.cell(hide_code=True)
170
- def __(mo):
171
- mo.accordion(
172
- {
173
- "Tip: private variables": (
174
- """
175
- Variables prefixed with an underscore are "private" to a cell, so
176
- they can be defined by multiple cells.
177
- """
178
- )
 
 
179
  }
180
- )
181
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
 
184
- @app.cell(hide_code=True)
185
- def __(mo):
186
- mo.md(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  """
188
- ## 2. UI elements
189
 
190
- Cells can output interactive UI elements. Interacting with a UI
191
- element **automatically triggers notebook execution**: when
192
- you interact with a UI element, its value is sent back to Python, and
193
- every cell that references that element is re-run.
194
 
195
- marimo provides a library of UI elements to choose from under
196
- `marimo.ui`.
 
 
 
 
 
197
  """
198
- )
199
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
 
202
  @app.cell
203
- def __(mo):
204
- mo.md("""**🌊 Some UI elements.** Try interacting with the below elements.""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  return
206
 
207
 
208
  @app.cell
209
- def __(mo):
210
- icon = mo.ui.dropdown(["πŸƒ", "🌊", "✨"], value="πŸƒ")
211
- return (icon,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
 
214
  @app.cell
215
- def __(icon, mo):
216
- repetitions = mo.ui.slider(1, 16, label=f"number of {icon.value}: ")
217
- return (repetitions,)
218
 
219
 
220
  @app.cell
221
- def __(icon, repetitions):
222
- icon, repetitions
223
  return
224
 
225
 
226
  @app.cell
227
- def __(icon, mo, repetitions):
228
- mo.md("# " + icon.value * repetitions.value)
 
229
  return
230
 
231
 
232
- @app.cell(hide_code=True)
233
- def __(mo):
234
- mo.md(
235
- """
236
- ## 3. marimo is just Python
237
 
238
- marimo cells parse Python (and only Python), and marimo notebooks are
239
- stored as pure Python files β€” outputs are _not_ included. There's no
240
- magical syntax.
241
 
242
- The Python files generated by marimo are:
 
 
 
 
243
 
244
- - easily versioned with git, yielding minimal diffs
245
- - legible for both humans and machines
246
- - formattable using your tool of choice,
247
- - usable as Python scripts, with UI elements taking their default
248
- values, and
249
- - importable by other modules (more on that in the future).
250
- """
251
- )
252
  return
253
 
254
 
255
- @app.cell(hide_code=True)
256
- def __(mo):
257
- mo.md(
258
- """
259
- ## 4. Running notebooks as apps
260
 
261
- marimo notebooks can double as apps. Click the app window icon in the
262
- bottom-right to see this notebook in "app view."
263
 
264
- Serve a notebook as an app with `marimo run` at the command-line.
265
- Of course, you can use marimo just to level-up your
266
- notebooking, without ever making apps.
267
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  )
 
 
 
 
 
269
  return
270
 
271
 
272
- @app.cell(hide_code=True)
273
- def __(mo):
274
- mo.md(
275
- """
276
- ## 5. The `marimo` command-line tool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
- **Creating and editing notebooks.** Use
279
 
280
- ```
281
- marimo edit
282
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
 
284
- in a terminal to start the marimo notebook server. From here
285
- you can create a new notebook or edit existing ones.
286
 
287
 
288
- **Running as apps.** Use
 
289
 
290
- ```
291
- marimo run notebook.py
292
- ```
293
 
294
- to start a webserver that serves your notebook as an app in read-only mode,
295
- with code cells hidden.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
- **Convert a Jupyter notebook.** Convert a Jupyter notebook to a marimo
298
- notebook using `marimo convert`:
299
 
300
- ```
301
- marimo convert your_notebook.ipynb > your_app.py
302
- ```
303
 
304
- **Tutorials.** marimo comes packaged with tutorials:
 
 
 
 
305
 
306
- - `dataflow`: more on marimo's automatic execution
307
- - `ui`: how to use UI elements
308
- - `markdown`: how to write markdown, with interpolated values and
309
- LaTeX
310
- - `plots`: how plotting works in marimo
311
- - `sql`: how to use SQL
312
- - `layout`: layout elements in marimo
313
- - `fileformat`: how marimo's file format works
314
- - `markdown-format`: for using `.md` files in marimo
315
- - `for-jupyter-users`: if you are coming from Jupyter
316
 
317
- Start a tutorial with `marimo tutorial`; for example,
318
 
319
- ```
320
- marimo tutorial dataflow
321
- ```
322
 
323
- In addition to tutorials, we have examples in our
324
- [our GitHub repo](https://www.github.com/marimo-team/marimo/tree/main/examples).
325
- """
326
- )
327
- return
328
 
 
 
 
329
 
330
- @app.cell(hide_code=True)
331
- def __(mo):
332
- mo.md(
333
- """
334
- ## 6. The marimo editor
335
 
336
- Here are some tips to help you get started with the marimo editor.
337
- """
338
- )
 
 
 
 
 
339
  return
340
 
341
 
342
  @app.cell
343
- def __(mo, tips):
344
- mo.accordion(tips)
345
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
 
 
 
 
 
 
 
347
 
348
- @app.cell(hide_code=True)
349
- def __(mo):
350
- mo.md("""## Finally, a fun fact""")
 
 
 
 
 
 
 
 
351
  return
352
 
353
 
354
- @app.cell(hide_code=True)
355
- def __(mo):
356
- mo.md(
357
- """
358
- The name "marimo" is a reference to a type of algae that, under
359
- the right conditions, clumps together to form a small sphere
360
- called a "marimo moss ball". Made of just strands of algae, these
361
- beloved assemblages are greater than the sum of their parts.
362
- """
363
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364
  return
365
 
366
 
367
- @app.cell(hide_code=True)
368
- def __():
369
- tips = {
370
- "Saving": (
371
- """
372
- **Saving**
373
-
374
- - _Name_ your app using the box at the top of the screen, or
375
- with `Ctrl/Cmd+s`. You can also create a named app at the
376
- command line, e.g., `marimo edit app_name.py`.
377
-
378
- - _Save_ by clicking the save icon on the bottom right, or by
379
- inputting `Ctrl/Cmd+s`. By default marimo is configured
380
- to autosave.
381
- """
382
- ),
383
- "Running": (
384
- """
385
- 1. _Run a cell_ by clicking the play ( β–· ) button on the top
386
- right of a cell, or by inputting `Ctrl/Cmd+Enter`.
387
-
388
- 2. _Run a stale cell_ by clicking the yellow run button on the
389
- right of the cell, or by inputting `Ctrl/Cmd+Enter`. A cell is
390
- stale when its code has been modified but not run.
391
-
392
- 3. _Run all stale cells_ by clicking the play ( β–· ) button on
393
- the bottom right of the screen, or input `Ctrl/Cmd+Shift+r`.
394
- """
395
- ),
396
- "Console Output": (
397
- """
398
- Console output (e.g., `print()` statements) is shown below a
399
- cell.
400
- """
401
- ),
402
- "Creating, Moving, and Deleting Cells": (
403
- """
404
- 1. _Create_ a new cell above or below a given one by clicking
405
- the plus button to the left of the cell, which appears on
406
- mouse hover.
407
-
408
- 2. _Move_ a cell up or down by dragging on the handle to the
409
- right of the cell, which appears on mouse hover.
410
-
411
- 3. _Delete_ a cell by clicking the trash bin icon. Bring it
412
- back by clicking the undo button on the bottom right of the
413
- screen, or with `Ctrl/Cmd+Shift+z`.
414
- """
415
- ),
416
- "Disabling Automatic Execution": (
417
- """
418
- Via the notebook settings (gear icon) or footer panel, you
419
- can disable automatic execution. This is helpful when
420
- working with expensive notebooks or notebooks that have
421
- side-effects like database transactions.
422
- """
423
- ),
424
- "Disabling Cells": (
425
- """
426
- You can disable a cell via the cell context menu.
427
- marimo will never run a disabled cell or any cells that depend on it.
428
- This can help prevent accidental execution of expensive computations
429
- when editing a notebook.
430
- """
431
- ),
432
- "Code Folding": (
433
- """
434
- You can collapse or fold the code in a cell by clicking the arrow
435
- icons in the line number column to the left, or by using keyboard
436
- shortcuts.
437
-
438
- Use the command palette (`Ctrl/Cmd+k`) or a keyboard shortcut to
439
- quickly fold or unfold all cells.
440
- """
441
- ),
442
- "Code Formatting": (
443
- """
444
- If you have [ruff](https://github.com/astral-sh/ruff) installed,
445
- you can format a cell with the keyboard shortcut `Ctrl/Cmd+b`.
446
- """
447
- ),
448
- "Command Palette": (
449
- """
450
- Use `Ctrl/Cmd+k` to open the command palette.
451
- """
452
- ),
453
- "Keyboard Shortcuts": (
454
- """
455
- Open the notebook menu (top-right) or input `Ctrl/Cmd+Shift+h` to
456
- view a list of all keyboard shortcuts.
457
- """
458
- ),
459
- "Configuration": (
460
- """
461
- Configure the editor by clicking the gears icon near the top-right
462
- of the screen.
463
- """
464
- ),
465
- }
466
- return (tips,)
467
 
468
 
469
  if __name__ == "__main__":
 
1
  import marimo
2
 
3
+ __generated_with = "0.11.26"
4
+ app = marimo.App(width="full")
5
 
6
 
7
  @app.cell
8
+ def _():
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+ import altair as alt
14
+ return alt, np, pd, plt, sns
15
 
16
 
17
  @app.cell
18
+ def _(platforms_data):
19
+ # Complete the platform data with GC AI, Notebook LM, and Vecflow
20
+ platforms_data.update(
21
+ {
22
+ 'GC AI': {
23
+ 'metrics': [
24
+ {'metric': 'Pass Rate (Arthur)', 'value': 60},
25
+ {'metric': 'Pass Rate (Anna)', 'value': 40},
26
+ {'metric': 'Helpfulness (Arthur)', 'value': 1.4 * 50},
27
+ {'metric': 'Helpfulness (Anna)', 'value': 0.5 * 50},
28
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50},
29
+ {'metric': 'Adequate Length (Anna)', 'value': 1.0 * 50},
30
+ ],
31
+ 'performance': [
32
+ {'task': 'Task #6', 'arthur': 6, 'anna': 0},
33
+ {'task': 'Task #13', 'arthur': 0, 'anna': 0},
34
+ {'task': 'Task #18', 'arthur': 6, 'anna': 6},
35
+ {'task': 'Task #19', 'arthur': 0, 'anna': 0},
36
+ {'task': 'Task #20', 'arthur': 6, 'anna': 0},
37
+ ],
38
+ 'strengths': [
39
+ 'Good adequate length rating from Arthur',
40
+ 'Decent pass rate from Arthur (60%)',
41
+ 'Solid helpfulness score from Arthur',
42
+ ],
43
+ 'weaknesses': [
44
+ 'Lowest helpfulness rating from Anna (0.5/2.0)',
45
+ 'Largest discrepancy between evaluators',
46
+ 'Lower pass rate from Anna (40%)',
47
+ ],
48
+ },
49
+ 'Notebook LM': {
50
+ 'metrics': [
51
+ {'metric': 'Pass Rate (Arthur)', 'value': 60},
52
+ {'metric': 'Pass Rate (Anna)', 'value': 60},
53
+ {'metric': 'Helpfulness (Arthur)', 'value': 0.8 * 50},
54
+ {'metric': 'Helpfulness (Anna)', 'value': 1.2 * 50},
55
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.6 * 50},
56
+ {'metric': 'Adequate Length (Anna)', 'value': 2.0 * 50},
57
+ ],
58
+ 'performance': [
59
+ {'task': 'Task #3', 'arthur': 6, 'anna': 0},
60
+ {'task': 'Task #6', 'arthur': 0, 'anna': 0},
61
+ {'task': 'Task #11', 'arthur': 0, 'anna': 6},
62
+ {'task': 'Task #13', 'arthur': 6, 'anna': 6},
63
+ {'task': 'Task #15', 'arthur': 6, 'anna': 6},
64
+ {'task': 'Task #19', 'arthur': 6, 'anna': 6},
65
+ ],
66
+ 'strengths': [
67
+ 'Perfect agreement between Arthur and Anna on pass/fail',
68
+ 'Highest adequate length rating from Anna (2.0/2.0)',
69
+ 'Consistent pass rate between evaluators (60%)',
70
+ ],
71
+ 'weaknesses': [
72
+ 'Lower helpfulness rating from Arthur (0.8/2.0)',
73
+ 'Mixed performance in specific tasks',
74
+ ],
75
+ },
76
+ 'Vecflow': {
77
+ 'metrics': [
78
+ {'metric': 'Pass Rate (Arthur)', 'value': 60},
79
+ {'metric': 'Pass Rate (Anna)', 'value': 40},
80
+ {'metric': 'Helpfulness (Arthur)', 'value': 0.6 * 50},
81
+ {'metric': 'Helpfulness (Anna)', 'value': 0.6 * 50},
82
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50},
83
+ {'metric': 'Adequate Length (Anna)', 'value': 1.4 * 50},
84
+ ],
85
+ 'performance': [
86
+ {'task': 'Task #11', 'arthur': 0, 'anna': 6},
87
+ {'task': 'Task #13', 'arthur': 6, 'anna': 0},
88
+ {'task': 'Task #15', 'arthur': 6, 'anna': 0},
89
+ {'task': 'Task #18', 'arthur': 6, 'anna': 6},
90
+ {'task': 'Task #19', 'arthur': 0, 'anna': 0},
91
+ ],
92
+ 'strengths': [
93
+ 'Perfect agreement on helpfulness between evaluators',
94
+ 'Strong adequate length scores from both evaluators',
95
+ 'Good performance in specialized tasks',
96
+ ],
97
+ 'weaknesses': [
98
+ 'Lowest helpfulness rating overall (0.6/2.0)',
99
+ 'Lower pass rate from Anna (40%)',
100
+ 'Inconsistent evaluation on complex tasks',
101
+ ],
102
+ },
103
+ }
104
+ )
105
+ return
106
 
107
 
108
  @app.cell
109
+ def _():
110
+ # Platform data
111
+ platforms_data = {
112
+ 'Chat GPT': {
113
+ 'metrics': [
114
+ {'metric': 'Pass Rate (Arthur)', 'value': 100},
115
+ {'metric': 'Pass Rate (Anna)', 'value': 40},
116
+ {'metric': 'Helpfulness (Arthur)', 'value': 1.5 * 50}, # Scaling to 0-100
117
+ {'metric': 'Helpfulness (Anna)', 'value': 1.25 * 50},
118
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.75 * 50},
119
+ {'metric': 'Adequate Length (Anna)', 'value': 1.25 * 50},
120
+ ],
121
+ 'performance': [{'task': 'Task #1', 'arthur': 6, 'anna': 0}, {'task': 'Task #3', 'arthur': 6, 'anna': 0}],
122
+ 'strengths': [
123
+ 'High pass rate from Arthur (100%)',
124
+ 'Strong helpfulness ratings from both evaluators',
125
+ 'Good adequate length scores',
126
+ ],
127
+ 'weaknesses': ['Lower pass rate from Anna (40%)', 'Inconsistent evaluation between Arthur and Anna'],
128
+ },
129
+ 'CoPilot': {
130
+ 'metrics': [
131
+ {'metric': 'Pass Rate (Arthur)', 'value': 40},
132
+ {'metric': 'Pass Rate (Anna)', 'value': 60},
133
+ {'metric': 'Helpfulness (Arthur)', 'value': 1.0 * 50},
134
+ {'metric': 'Helpfulness (Anna)', 'value': 1.33 * 50},
135
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.2 * 50},
136
+ {'metric': 'Adequate Length (Anna)', 'value': 1.33 * 50},
137
+ ],
138
+ 'performance': [
139
+ {'task': 'Task #1', 'arthur': 6, 'anna': 6},
140
+ {'task': 'Task #11', 'arthur': 0, 'anna': 6},
141
+ {'task': 'Task #15', 'arthur': 0, 'anna': 0},
142
+ {'task': 'Task #18', 'arthur': 6, 'anna': 0},
143
+ {'task': 'Task #20', 'arthur': 0, 'anna': 6},
144
+ ],
145
+ 'strengths': [
146
+ 'Balanced helpfulness scores from both evaluators',
147
+ 'Consistent adequate length ratings',
148
+ 'Higher pass rate from Anna than from Arthur',
149
+ ],
150
+ 'weaknesses': ['Lower overall pass rates', 'Inconsistent evaluation between tasks', 'Below-average scores on complex tasks'],
151
+ },
152
+ 'DeepSeek': {
153
+ 'metrics': [
154
+ {'metric': 'Pass Rate (Arthur)', 'value': 75},
155
+ {'metric': 'Pass Rate (Anna)', 'value': 100},
156
+ {'metric': 'Helpfulness (Arthur)', 'value': 1.33 * 50},
157
+ {'metric': 'Helpfulness (Anna)', 'value': 2.0 * 50},
158
+ {'metric': 'Adequate Length (Arthur)', 'value': 2.0 * 50},
159
+ {'metric': 'Adequate Length (Anna)', 'value': 1.67 * 50},
160
+ ],
161
+ 'performance': [
162
+ {'task': 'Task #11', 'arthur': 6, 'anna': 6},
163
+ {'task': 'Task #13', 'arthur': 6, 'anna': 0},
164
+ {'task': 'Task #18', 'arthur': 6, 'anna': 6},
165
+ {'task': 'Task #19', 'arthur': 0, 'anna': 6},
166
+ ],
167
+ 'strengths': [
168
+ 'Perfect pass rate from Anna (100%)',
169
+ 'Highest helpfulness rating from Anna (2.0/2.0)',
170
+ 'Highest adequate length rating from Arthur (2.0/2.0)',
171
+ 'Strong overall performance across metrics',
172
+ ],
173
+ 'weaknesses': ['Some inconsistency between evaluators', 'Lower pass rate from Arthur compared to Anna'],
174
+ },
175
+ }
176
+ return (platforms_data,)
177
 
 
 
 
178
 
179
+ @app.cell
180
+ def _(platforms_data):
181
+ # Complete the platform data with GC AI, Notebook LM, and Vecflow
182
+ platforms_data.update(
183
+ {
184
+ 'GC AI': {
185
+ 'metrics': [
186
+ {'metric': 'Pass Rate (Arthur)', 'value': 60},
187
+ {'metric': 'Pass Rate (Anna)', 'value': 40},
188
+ {'metric': 'Helpfulness (Arthur)', 'value': 1.4 * 50},
189
+ {'metric': 'Helpfulness (Anna)', 'value': 0.5 * 50},
190
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50},
191
+ {'metric': 'Adequate Length (Anna)', 'value': 1.0 * 50},
192
+ ],
193
+ 'performance': [
194
+ {'task': 'Task #6', 'arthur': 6, 'anna': 0},
195
+ {'task': 'Task #13', 'arthur': 0, 'anna': 0},
196
+ {'task': 'Task #18', 'arthur': 6, 'anna': 6},
197
+ {'task': 'Task #19', 'arthur': 0, 'anna': 0},
198
+ {'task': 'Task #20', 'arthur': 6, 'anna': 0},
199
+ ],
200
+ 'strengths': [
201
+ 'Good adequate length rating from Arthur',
202
+ 'Decent pass rate from Arthur (60%)',
203
+ 'Solid helpfulness score from Arthur',
204
+ ],
205
+ 'weaknesses': [
206
+ 'Lowest helpfulness rating from Anna (0.5/2.0)',
207
+ 'Largest discrepancy between evaluators',
208
+ 'Lower pass rate from Anna (40%)',
209
+ ],
210
+ },
211
+ 'Notebook LM': {
212
+ 'metrics': [
213
+ {'metric': 'Pass Rate (Arthur)', 'value': 60},
214
+ {'metric': 'Pass Rate (Anna)', 'value': 60},
215
+ {'metric': 'Helpfulness (Arthur)', 'value': 0.8 * 50},
216
+ {'metric': 'Helpfulness (Anna)', 'value': 1.2 * 50},
217
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.6 * 50},
218
+ {'metric': 'Adequate Length (Anna)', 'value': 2.0 * 50},
219
+ ],
220
+ 'performance': [
221
+ {'task': 'Task #3', 'arthur': 6, 'anna': 0},
222
+ {'task': 'Task #6', 'arthur': 0, 'anna': 0},
223
+ {'task': 'Task #11', 'arthur': 0, 'anna': 6},
224
+ {'task': 'Task #13', 'arthur': 6, 'anna': 6},
225
+ {'task': 'Task #15', 'arthur': 6, 'anna': 6},
226
+ {'task': 'Task #19', 'arthur': 6, 'anna': 6},
227
+ ],
228
+ 'strengths': [
229
+ 'Perfect agreement between Arthur and Anna on pass/fail',
230
+ 'Highest adequate length rating from Anna (2.0/2.0)',
231
+ 'Consistent pass rate between evaluators (60%)',
232
+ ],
233
+ 'weaknesses': [
234
+ 'Lower helpfulness rating from Arthur (0.8/2.0)',
235
+ 'Mixed performance in specific tasks',
236
+ ],
237
+ },
238
+ 'Vecflow': {
239
+ 'metrics': [
240
+ {'metric': 'Pass Rate (Arthur)', 'value': 60},
241
+ {'metric': 'Pass Rate (Anna)', 'value': 40},
242
+ {'metric': 'Helpfulness (Arthur)', 'value': 0.6 * 50},
243
+ {'metric': 'Helpfulness (Anna)', 'value': 0.6 * 50},
244
+ {'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50},
245
+ {'metric': 'Adequate Length (Anna)', 'value': 1.4 * 50},
246
+ ],
247
+ 'performance': [
248
+ {'task': 'Task #11', 'arthur': 0, 'anna': 6},
249
+ {'task': 'Task #13', 'arthur': 6, 'anna': 0},
250
+ {'task': 'Task #15', 'arthur': 6, 'anna': 0},
251
+ {'task': 'Task #18', 'arthur': 6, 'anna': 6},
252
+ {'task': 'Task #19', 'arthur': 0, 'anna': 0},
253
+ ],
254
+ 'strengths': [
255
+ 'Perfect agreement on helpfulness between evaluators',
256
+ 'Strong adequate length scores from both evaluators',
257
+ 'Good performance in specialized tasks',
258
+ ],
259
+ 'weaknesses': [
260
+ 'Lowest helpfulness rating overall (0.6/2.0)',
261
+ 'Lower pass rate from Anna (40%)',
262
+ 'Inconsistent evaluation on complex tasks',
263
+ ],
264
+ },
265
+ }
266
  )
267
  return
268
 
269
 
270
+ @app.cell
271
+ def _(pd):
272
+ # Task type data
273
+ task_type_data = pd.DataFrame(
274
+ [
275
+ {'name': 'Simple Extraction', 'arthur': 80, 'anna': 70},
276
+ {'name': 'Complex Analysis', 'arthur': 65, 'anna': 60},
277
+ {'name': 'Regulatory/Legal', 'arthur': 50, 'anna': 40},
278
+ {'name': 'Identification', 'arthur': 90, 'anna': 75},
279
+ {'name': 'Summarization', 'arthur': 70, 'anna': 65},
280
+ ]
 
 
 
 
 
 
 
281
  )
282
+
283
+ # Platform performance over time data
284
+ trend_data = {
285
+ 'Chat GPT': [
286
+ {'task': 1, 'arthur': 6, 'anna': 0},
287
+ {'task': 3, 'arthur': 6, 'anna': 0},
288
+ {'task': 11, 'arthur': 6, 'anna': 0},
289
+ {'task': 13, 'arthur': 6, 'anna': 6},
290
+ {'task': 18, 'arthur': 6, 'anna': 6},
291
+ ],
292
+ 'CoPilot': [
293
+ {'task': 1, 'arthur': 6, 'anna': 6},
294
+ {'task': 11, 'arthur': 0, 'anna': 6},
295
+ {'task': 15, 'arthur': 0, 'anna': 0},
296
+ {'task': 18, 'arthur': 6, 'anna': 0},
297
+ {'task': 20, 'arthur': 0, 'anna': 6},
298
+ ],
299
+ 'DeepSeek': [
300
+ {'task': 11, 'arthur': 6, 'anna': 6},
301
+ {'task': 13, 'arthur': 6, 'anna': 0},
302
+ {'task': 18, 'arthur': 6, 'anna': 6},
303
+ {'task': 19, 'arthur': 0, 'anna': 6},
304
+ ],
305
+ 'GC AI': [
306
+ {'task': 6, 'arthur': 6, 'anna': 0},
307
+ {'task': 13, 'arthur': 0, 'anna': 0},
308
+ {'task': 18, 'arthur': 6, 'anna': 6},
309
+ {'task': 19, 'arthur': 0, 'anna': 0},
310
+ {'task': 20, 'arthur': 6, 'anna': 0},
311
+ ],
312
+ 'Notebook LM': [
313
+ {'task': 3, 'arthur': 6, 'anna': 0},
314
+ {'task': 6, 'arthur': 0, 'anna': 0},
315
+ {'task': 11, 'arthur': 0, 'anna': 6},
316
+ {'task': 13, 'arthur': 6, 'anna': 6},
317
+ {'task': 15, 'arthur': 6, 'anna': 6},
318
+ {'task': 19, 'arthur': 6, 'anna': 6},
319
+ ],
320
+ 'Vecflow': [
321
+ {'task': 11, 'arthur': 0, 'anna': 6},
322
+ {'task': 13, 'arthur': 6, 'anna': 0},
323
+ {'task': 15, 'arthur': 6, 'anna': 0},
324
+ {'task': 18, 'arthur': 6, 'anna': 6},
325
+ {'task': 19, 'arthur': 0, 'anna': 0},
326
+ ],
327
+ }
328
+
329
+ # Map pass/fail values to binary for plotting
330
+ mapped_trend_data = {}
331
+ for platform, data in trend_data.items():
332
+ mapped_trend_data[platform] = [
333
+ {'task': item['task'], 'arthur': 1 if item['arthur'] == 6 else 0, 'anna': 1 if item['anna'] == 6 else 0} for item in data
334
+ ]
335
+ return data, mapped_trend_data, platform, task_type_data, trend_data
336
+
337
+
338
+ @app.cell
339
+ def _(alt, mapped_trend_data, pd):
340
+ def plot_task_performance_interactive(platform_name):
341
+ """Create an interactive line chart for task performance"""
342
+
343
+ # Convert to DataFrame
344
+ data = pd.DataFrame(mapped_trend_data[platform_name])
345
+
346
+ # Melt the dataframe for Altair
347
+ data_melted = data.melt(id_vars=['task'], var_name='evaluator', value_name='result')
348
+
349
+ # Create a color scale
350
+ color_scale = alt.Scale(domain=['arthur', 'anna'], range=['#4c78a8', '#ff7f0e'])
351
+
352
+ # Create the chart
353
+ chart = (
354
+ alt.Chart(data_melted)
355
+ .mark_line(point=True)
356
+ .encode(
357
+ x=alt.X('task:N', title='Task Number'),
358
+ y=alt.Y(
359
+ 'result:N', title='Result', scale=alt.Scale(domain=[0, 1]), axis=alt.Axis(labelExpr="datum.value === 0 ? 'Fail' : 'Pass'")
360
+ ),
361
+ color=alt.Color('evaluator:N', title='Evaluator', scale=color_scale, legend=alt.Legend(title='Evaluator')),
362
+ tooltip=['task', 'evaluator', alt.Tooltip('result', title='Result', format='.0f', formatType='number')],
363
+ )
364
+ .transform_calculate(result_label="datum.result === 0 ? 'Fail' : 'Pass'")
365
+ .properties(width=500, height=300, title=f'{platform_name} Task Performance')
366
+ .configure_title(fontSize=20, anchor='start')
367
+ .configure_axis(labelFontSize=12, titleFontSize=14)
368
+ .configure_point(size=100)
369
+ .interactive()
370
+ )
371
+
372
+ return chart
373
+ return (plot_task_performance_interactive,)
374
+
375
+
376
+ @app.cell
377
+ def _(alt, task_type_data):
378
+ def plot_task_type_performance_interactive():
379
+ """Create an interactive bar chart for task type performance"""
380
+
381
+ # Melt the dataframe for Altair
382
+ task_type_melted = task_type_data.melt(id_vars=['name'], var_name='evaluator', value_name='score')
383
+
384
+ # Create a color scale
385
+ color_scale = alt.Scale(domain=['arthur', 'anna'], range=['#4c78a8', '#ff7f0e'])
386
+
387
+ # Create the chart
388
+ chart = (
389
+ alt.Chart(task_type_melted)
390
+ .mark_bar()
391
+ .encode(
392
+ x=alt.X('name:N', title='Task Type', axis=alt.Axis(labelAngle=-45)),
393
+ y=alt.Y('score:Q', title='Average Score (%)'),
394
+ color=alt.Color('evaluator:N', title='Evaluator', scale=color_scale),
395
+ tooltip=['name', 'evaluator', alt.Tooltip('score', title='Score', format='.0f')],
396
+ )
397
+ .properties(width=600, height=400, title='Task Type Performance Analysis')
398
+ .configure_title(fontSize=20, anchor='start')
399
+ .configure_axis(labelFontSize=12, titleFontSize=14)
400
+ .interactive()
401
+ )
402
+
403
+ return chart
404
+ return (plot_task_type_performance_interactive,)
405
+
406
+
407
+ @app.cell
408
+ def _(
409
+ display_platform_evaluation,
410
+ platform_summary,
411
+ plot_platform_radar_interactive,
412
+ plot_task_performance_interactive,
413
+ ):
414
+ def analyze_platform_interactive(platform_name='DeepSeek'):
415
+ """Create a comprehensive interactive analysis for a single platform"""
416
+ from IPython.display import display, HTML, Markdown
417
+
418
+ # Display the platform name
419
+ display(Markdown(f'# AI Platform In-Depth Analysis: {platform_name}'))
420
+ # Create the radar chart for metrics
421
+ display(Markdown('## Performance Metrics'))
422
+ display(plot_platform_radar_interactive(platform_name))
423
+ # Show task performance
424
+ display(Markdown('## Task Performance'))
425
+ display(plot_task_performance_interactive(platform_name))
426
+ # Display strengths and weaknesses
427
+ display(Markdown('## Platform Evaluation'))
428
+ display_platform_evaluation(platform_name)
429
+ # Show platform summary
430
+ display(Markdown('## Platform Summary'))
431
+ platform_summary(platform_name)
432
+ return None
433
+ return (analyze_platform_interactive,)
434
+
435
+
436
+ @app.cell
437
+ def _(analyze_platform_interactive):
438
+ # Analyze Vecflow
439
+ analyze_platform_interactive('Vecflow')
440
  return
441
 
442
 
443
+ @app.cell
444
+ def _(analyze_platform_interactive):
445
+ # Analyze Vecflow
446
+ analyze_platform_interactive('Vecflow')
 
 
 
 
447
  return
448
 
449
 
450
+ @app.cell
451
+ def _(analyze_platform_interactive):
452
+ # Analyze Notebook LM
453
+ analyze_platform_interactive('Notebook LM')
454
+ return
455
 
 
 
456
 
457
+ @app.cell
458
+ def _(analyze_platform_interactive):
459
+ # Analyze GC AI
460
+ analyze_platform_interactive('GC AI')
461
+ return
462
 
463
+
464
+ @app.cell
465
+ def _(analyze_platform_interactive):
466
+ # Analyze CoPilot
467
+ analyze_platform_interactive('CoPilot')
468
  return
469
 
470
 
471
+ @app.cell
472
+ def _(analyze_platform_interactive):
473
+ # Analyze Chat GPT
474
+ analyze_platform_interactive('Chat GPT')
475
+ return
 
476
 
 
 
 
477
 
478
+ @app.cell
479
+ def _(analyze_platform_interactive):
480
+ # Analyze DeepSeek
481
+ analyze_platform_interactive('DeepSeek')
 
 
 
 
 
 
 
 
 
482
  return
483
 
484
 
485
  @app.cell
486
+ def _(compare_all_platforms_interactive):
487
+ # Compare all platforms
488
+ compare_all_platforms_interactive()
489
+ return
490
 
491
 
492
+ @app.cell
493
+ def _(
494
+ compare_platforms_interactive,
495
+ pd,
496
+ platforms_data,
497
+ plot_task_type_performance_interactive,
498
+ ):
499
+ def compare_all_platforms_interactive():
500
+ """Display interactive comparison of all platforms"""
501
+
502
+ from IPython.display import display, Markdown
503
+
504
+ # Display the title
505
+ display(Markdown('# AI Platform Comparison'))
506
+
507
+ # Show interactive comparison chart
508
+ display(Markdown('## Metrics Comparison'))
509
+ display(compare_platforms_interactive())
510
+
511
+ # Show task type performance
512
+ display(Markdown('## Task Type Performance'))
513
+ display(plot_task_type_performance_interactive())
514
+
515
+ # Overall rankings
516
+ display(Markdown('## Overall Platform Rankings'))
517
+
518
+ # Calculate average metrics for each platform
519
+ rankings = []
520
+ for platform, data in platforms_data.items():
521
+ avg_metrics = sum(metric['value'] for metric in data['metrics']) / len(data['metrics'])
522
+ rankings.append({'Platform': platform, 'Average Score': avg_metrics})
523
+
524
+ rankings_df = pd.DataFrame(rankings)
525
+ rankings_df.sort_values('Average Score', ascending=False, inplace=True)
526
+
527
+ # Create a DataFrame to display rankings
528
+ for i, (idx, row) in enumerate(rankings_df.iterrows(), 1):
529
+ print(f'{i}. {row["Platform"]} - Average Score: {row["Average Score"]:.2f}')
530
+
531
+ return None
532
+ return (compare_all_platforms_interactive,)
533
+
534
+
535
+ @app.cell
536
+ def _(alt, pd, platforms_data):
537
+ def compare_platforms_interactive():
538
+ """Create an interactive chart for comparing all platforms"""
539
+
540
+ # Create a DataFrame with all platform metrics
541
+ metrics_comparison = []
542
+
543
+ for platform, data in platforms_data.items():
544
+ for metric in data['metrics']:
545
+ metrics_comparison.append({'Platform': platform, 'Metric': metric['metric'], 'Value': metric['value']})
546
+
547
+ comparison_df = pd.DataFrame(metrics_comparison)
548
+
549
+ # Create a grouped bar chart
550
+ chart = (
551
+ alt.Chart(comparison_df)
552
+ .mark_bar()
553
+ .encode(
554
+ x=alt.X('Platform:N', title='Platform'),
555
+ y=alt.Y('Value:Q', title='Score'),
556
+ color=alt.Color('Platform:N', legend=None),
557
+ column=alt.Column('Metric:N', title=None),
558
+ tooltip=['Platform', 'Metric', 'Value'],
559
  )
560
+ .properties(width=100, title='Platform Metric Comparison')
561
+ .configure_title(fontSize=20, anchor='start')
562
+ .configure_axis(labelFontSize=12, titleFontSize=14)
563
+ .interactive()
564
+ )
565
+
566
+ return chart
567
+ return (compare_platforms_interactive,)
568
+
569
+
570
+ @app.cell
571
+ def _(alt, pd, platforms_data):
572
+ def plot_platform_radar_interactive(platform_name):
573
+ """Create an interactive radar chart for platform metrics using Altair"""
574
+
575
+ # Get platform metrics data
576
+ metrics = platforms_data[platform_name]['metrics']
577
+
578
+ # Convert to long format for Altair
579
+ metrics_df = pd.DataFrame(metrics)
580
+
581
+ # Create the base chart
582
+ chart = (
583
+ alt.Chart(metrics_df)
584
+ .mark_line(point=True)
585
+ .encode(
586
+ x=alt.X('metric:N', title=None, sort=None),
587
+ y=alt.Y('value:Q', scale=alt.Scale(domain=[0, 100]), title='Score'),
588
+ color=alt.value('#4c78a8'),
589
+ tooltip=['metric', 'value'],
590
+ )
591
+ .properties(width=500, height=400, title=f'{platform_name} Performance Metrics')
592
+ .configure_title(fontSize=20, anchor='start')
593
+ .configure_axis(labelFontSize=12, titleFontSize=14)
594
+ .configure_point(size=100)
595
+ .interactive()
596
+ )
597
+
598
+ return chart
599
+ return (plot_platform_radar_interactive,)
600
+
601
+
602
+ @app.cell
603
+ def _(alt):
604
+ alt.renderers.enable('default')
605
  return
606
 
607
 
608
+ @app.cell
609
+ def _(compare_all_platforms):
610
+ # Compare all platforms
611
+ compare_all_platforms()
 
 
 
 
 
612
  return
613
 
614
 
615
+ @app.cell
616
+ def _(pd, platforms_data, plot_task_type_performance, plt):
617
+ def compare_all_platforms():
618
+ """Display comparison of all platforms"""
619
+
620
+ # Create a DataFrame with all platform metrics for comparison
621
+ metrics_comparison = []
622
+
623
+ for platform, data in platforms_data.items():
624
+ # Extract metrics
625
+ platform_metrics = {metric['metric']: metric['value'] for metric in data['metrics']}
626
+ platform_metrics['Platform'] = platform
627
+ metrics_comparison.append(platform_metrics)
628
+
629
+ comparison_df = pd.DataFrame(metrics_comparison)
630
+ comparison_df.set_index('Platform', inplace=True)
631
+
632
+ # Display the comparison table
633
+ print('# AI Platform Comparison\n')
634
+ print('## Metrics Comparison')
635
+ print(comparison_df)
636
+
637
+ # Create a bar chart to compare platforms
638
+ plt.figure(figsize=(14, 8))
639
+ comparison_df.plot(kind='bar', figsize=(14, 8))
640
+ plt.title('Platform Metrics Comparison')
641
+ plt.xlabel('Platform')
642
+ plt.ylabel('Score')
643
+ plt.legend(title='Metrics', bbox_to_anchor=(1.05, 1), loc='upper left')
644
+ plt.tight_layout()
645
+
646
+ print('\n## Task Type Performance')
647
+ plot_task_type_performance()
648
+
649
+ # Overall rankings
650
+ print('\n## Overall Platform Rankings')
651
+
652
+ # Calculate average metrics for each platform
653
+ rankings = []
654
+ for platform, data in platforms_data.items():
655
+ avg_metrics = sum(metric['value'] for metric in data['metrics']) / len(data['metrics'])
656
+ rankings.append({'Platform': platform, 'Average Score': avg_metrics})
657
+
658
+ rankings_df = pd.DataFrame(rankings)
659
+ rankings_df.sort_values('Average Score', ascending=False, inplace=True)
660
+
661
+ # Display rankings
662
+ for i, (idx, row) in enumerate(rankings_df.iterrows(), 1):
663
+ print(f'{i}. {row["Platform"]} - Average Score: {row["Average Score"]:.2f}')
664
+
665
+ return plt.gca()
666
+ return (compare_all_platforms,)
667
+
668
+
669
+ @app.cell
670
+ def _(compare_all_platforms):
671
+ # Compare all platforms
672
+ compare_all_platforms()
673
  return
674
 
675
 
676
+ @app.cell
677
+ def _(platforms_data):
678
+ def platform_summary(platform_name):
679
+ """Display a summary of the platform performance"""
680
+
681
+ summaries = {
682
+ 'DeepSeek': 'DeepSeek shows the strongest overall performance across both evaluators, with a perfect pass rate from Anna and high marks on both helpfulness and adequate length metrics. It consistently delivers high-quality responses across various task types.',
683
+ 'Chat GPT': "Chat GPT performs excellently according to Arthur with a perfect pass rate, but shows inconsistency with Anna's evaluations. Its strengths lie in helpfulness and adequate response length, particularly in extraction and summarization tasks.",
684
+ 'Notebook LM': 'Notebook LM demonstrates the highest level of evaluator agreement with identical pass rates from Arthur and Anna. It excels in adequate length ratings but scores lower on helpfulness metrics from Arthur.',
685
+ 'CoPilot': 'CoPilot shows moderate performance across metrics with slightly higher ratings from Anna than Arthur. It maintains consistency in adequate length but struggles with more complex analysis tasks.',
686
+ 'GC AI': 'GC AI exhibits the largest discrepancy between evaluator ratings, with Arthur giving significantly higher scores than Anna across all metrics. It performs well in adequate length according to Arthur but scores poorly in helpfulness from Anna.',
687
+ 'Vecflow': 'Vecflow demonstrates perfect agreement on helpfulness ratings between evaluators, though these scores are the lowest across all platforms. It excels in adequate length metrics but shows inconsistent pass rates between evaluators.',
688
  }
689
+
690
+ # Create tags for the platform
691
+ tags = []
692
+ metrics = platforms_data[platform_name]['metrics']
693
+
694
+ tags.append(f'πŸ“Š {platform_name}')
695
+
696
+ if metrics[0]['value'] >= 60:
697
+ tags.append('🟒 High Arthur Pass Rate')
698
+
699
+ if metrics[1]['value'] >= 60:
700
+ tags.append('🟒 High Anna Pass Rate')
701
+
702
+ if metrics[2]['value'] / 50 >= 1.3:
703
+ tags.append('🟣 Strong Helpfulness (Arthur)')
704
+
705
+ if metrics[3]['value'] / 50 >= 1.3:
706
+ tags.append('🟣 Strong Helpfulness (Anna)')
707
+
708
+ if metrics[4]['value'] / 50 >= 1.7:
709
+ tags.append('πŸ”΅ Excellent Length (Arthur)')
710
+
711
+ if metrics[5]['value'] / 50 >= 1.7:
712
+ tags.append('πŸ”΅ Excellent Length (Anna)')
713
+
714
+ if metrics[0]['value'] == metrics[1]['value']:
715
+ tags.append('🟑 Evaluator Agreement')
716
+
717
+ print(f'== {platform_name} Summary ==\n')
718
+ print(summaries[platform_name])
719
+ print('\nTags:')
720
+ print(' '.join(tags))
721
+
722
+ return None
723
+ return (platform_summary,)
724
+
725
+
726
+ @app.cell
727
+ def _(np, platforms_data, plt):
728
+ def plot_platform_radar(platform_name):
729
+ """Create a radar chart for platform metrics with enhanced styling"""
730
+ metrics = platforms_data[platform_name]['metrics']
731
+
732
+ # Extract data
733
+ categories = [m['metric'] for m in metrics]
734
+ values = [m['value'] for m in metrics]
735
+
736
+ # Number of categories
737
+ N = len(categories)
738
+
739
+ # Create angle for each category
740
+ angles = [n / float(N) * 2 * np.pi for n in range(N)]
741
+ angles += angles[:1] # Close the loop
742
+
743
+ # Add the first value at the end to close the circle
744
+ values += values[:1]
745
+
746
+ # Create figure
747
+ fig, ax = plt.subplots(figsize=(10, 6), subplot_kw=dict(polar=True), facecolor='#f8f9fa')
748
+
749
+ # Draw the chart
750
+ ax.plot(angles, values, linewidth=2, linestyle='solid', label=platform_name, color='#8884d8')
751
+ ax.fill(angles, values, alpha=0.25, color='#8884d8')
752
+
753
+ # Set category labels
754
+ plt.xticks(angles[:-1], categories, size=10, fontweight='bold', color='#444444')
755
+
756
+ # Set y-axis limits
757
+ ax.set_ylim(0, 100)
758
+
759
+ # Add grid
760
+ ax.grid(color='#dddddd', linestyle='-', linewidth=0.5)
761
+
762
+ # Set background color for each level
763
+ ax.set_facecolor('#f8f9fa')
764
+
765
+ # Add title with platform-specific color
766
+ platform_colors = {
767
+ 'DeepSeek': '#6b5b95',
768
+ 'Chat GPT': '#3498db',
769
+ 'CoPilot': '#f39c12',
770
+ 'GC AI': '#1abc9c',
771
+ 'Notebook LM': '#e74c3c',
772
+ 'Vecflow': '#9b59b6',
773
+ }
774
+ color = platform_colors.get(platform_name, '#8884d8')
775
+ plt.title(f'{platform_name} Performance Metrics', size=16, fontweight='bold', color=color, pad=20)
776
+
777
+ # Add legend
778
+ plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1), frameon=True, facecolor='white', edgecolor='#dddddd')
779
+
780
+ plt.tight_layout()
781
+ return plt.gca()
782
+ return (plot_platform_radar,)
783
 
784
 
785
+ @app.cell
786
+ def _(mapped_trend_data, pd, plt, sns):
787
+ def plot_task_performance(platform_name):
788
+ """Create an enhanced line chart for task performance"""
789
+ # Convert to DataFrame
790
+ data = pd.DataFrame(mapped_trend_data[platform_name])
791
+
792
+ # Set a theme
793
+ sns.set_style('whitegrid')
794
+ plt.figure(figsize=(10, 6), facecolor='#f8f9fa')
795
+
796
+ # Platform-specific colors
797
+ platform_colors = {
798
+ 'DeepSeek': ('#6b5b95', '#d64161'),
799
+ 'Chat GPT': ('#3498db', '#1abc9c'),
800
+ 'CoPilot': ('#f39c12', '#e67e22'),
801
+ 'GC AI': ('#1abc9c', '#16a085'),
802
+ 'Notebook LM': ('#e74c3c', '#c0392b'),
803
+ 'Vecflow': ('#9b59b6', '#8e44ad'),
804
+ }
805
+
806
+ arthur_color, anna_color = platform_colors.get(platform_name, ('#8884d8', '#82ca9d'))
807
+
808
+ # Plot lines with enhanced styling
809
+ plt.plot(
810
+ data['task'],
811
+ data['arthur'],
812
+ marker='o',
813
+ markersize=10,
814
+ linestyle='-',
815
+ linewidth=2.5,
816
+ label="Arthur's Evaluation",
817
+ color=arthur_color,
818
+ alpha=0.9,
819
+ )
820
+
821
+ plt.plot(
822
+ data['task'],
823
+ data['anna'],
824
+ marker='s',
825
+ markersize=10,
826
+ linestyle='-',
827
+ linewidth=2.5,
828
+ label="Anna's Evaluation",
829
+ color=anna_color,
830
+ alpha=0.9,
831
+ )
832
+
833
+ # Customize plot
834
+ plt.title(f'{platform_name} Task Performance', fontsize=16, fontweight='bold')
835
+ plt.xlabel('Task Number', fontsize=12, fontweight='bold')
836
+ plt.ylabel('Result', fontsize=12, fontweight='bold')
837
+
838
+ # Set y-axis to show Pass/Fail instead of 1/0
839
+ plt.yticks([0, 1], ['Fail', 'Pass'], fontsize=12)
840
+
841
+ # Ensure x-axis shows integer task numbers
842
+ plt.xticks(data['task'], fontsize=11)
843
+
844
+ plt.grid(True, linestyle='--', alpha=0.7)
845
+
846
+ # Enhanced legend
847
+ legend = plt.legend(
848
+ loc='upper center', bbox_to_anchor=(0.5, -0.15), facecolor='white', edgecolor='#dddddd', shadow=True, ncol=2, fontsize=12
849
+ )
850
+
851
+ # Add a border to the plot
852
+ ax = plt.gca()
853
+ for spine in ax.spines.values():
854
+ spine.set_edgecolor('#dddddd')
855
+ spine.set_linewidth(1.5)
856
+
857
+ plt.tight_layout()
858
+ return plt.gca()
859
+ return (plot_task_performance,)
860
+
861
+
862
+ @app.cell
863
+ def _(platforms_data):
864
+ def display_platform_evaluation(platform_name):
865
+ """Display platform strengths and weaknesses with HTML styling"""
866
+ strengths = platforms_data[platform_name]['strengths']
867
+ weaknesses = platforms_data[platform_name]['weaknesses']
868
+
869
+ # Platform-specific color
870
+ platform_colors = {
871
+ 'DeepSeek': '#6b5b95',
872
+ 'Chat GPT': '#3498db',
873
+ 'CoPilot': '#f39c12',
874
+ 'GC AI': '#1abc9c',
875
+ 'Notebook LM': '#e74c3c',
876
+ 'Vecflow': '#9b59b6',
877
+ }
878
+ color = platform_colors.get(platform_name, '#8884d8')
879
+
880
+ html_output = f"""
881
+ <div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; border: 1px solid #dddddd; margin: 15px 0;">
882
+ <h2 style="color: {color}; text-align: center; margin-bottom: 20px; border-bottom: 2px solid {color}; padding-bottom: 10px;">
883
+ {platform_name} Evaluation
884
+ </h2>
885
+
886
+ <div style="display: flex; flex-wrap: wrap; gap: 20px;">
887
+ <div style="flex: 1; min-width: 300px; background-color: white; border-radius: 8px; padding: 15px; border: 1px solid #eaeaea; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
888
+ <h3 style="color: #28a745; margin-bottom: 15px; border-bottom: 1px solid #eaeaea; padding-bottom: 8px;">Key Strengths</h3>
889
+ <ul style="list-style-type: none; padding-left: 5px; margin-bottom: 0;">
890
  """
 
891
 
892
+ for strength in strengths:
893
+ html_output += f'<li style="margin-bottom: 10px; display: flex; align-items: center;"><span style="color: #28a745; margin-right: 10px; font-size: 18px;">βœ…</span> {strength}</li>'
 
 
894
 
895
+ html_output += """
896
+ </ul>
897
+ </div>
898
+
899
+ <div style="flex: 1; min-width: 300px; background-color: white; border-radius: 8px; padding: 15px; border: 1px solid #eaeaea; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
900
+ <h3 style="color: #dc3545; margin-bottom: 15px; border-bottom: 1px solid #eaeaea; padding-bottom: 8px;">Areas for Improvement</h3>
901
+ <ul style="list-style-type: none; padding-left: 5px; margin-bottom: 0;">
902
  """
903
+
904
+ for weakness in weaknesses:
905
+ html_output += f'<li style="margin-bottom: 10px; display: flex; align-items: center;"><span style="color: #dc3545; margin-right: 10px; font-size: 18px;">⚠️</span> {weakness}</li>'
906
+
907
+ html_output += """
908
+ </ul>
909
+ </div>
910
+ </div>
911
+ </div>
912
+ """
913
+
914
+ from IPython.display import HTML, display
915
+
916
+ display(HTML(html_output))
917
+ return None
918
+ return (display_platform_evaluation,)
919
+
920
+
921
+ @app.cell
922
+ def _(np, plt, sns, task_type_data):
923
+ def plot_task_type_performance():
924
+ """Create an enhanced bar chart for task type performance"""
925
+ # Set a theme
926
+ sns.set_style('whitegrid')
927
+ plt.figure(figsize=(12, 6), facecolor='#f8f9fa')
928
+
929
+ # Customize colors
930
+ colors = {'arthur': '#6b5b95', 'anna': '#d64161'}
931
+
932
+ # Set width of bars
933
+ bar_width = 0.35
934
+
935
+ # Set positions of bars on x-axis
936
+ x = np.arange(len(task_type_data))
937
+
938
+ # Create bars with enhanced styling
939
+ plt.bar(
940
+ x - bar_width / 2,
941
+ task_type_data['arthur'],
942
+ bar_width,
943
+ label="Arthur's Rating",
944
+ color=colors['arthur'],
945
+ edgecolor='white',
946
+ linewidth=1.5,
947
+ alpha=0.9,
948
+ )
949
+
950
+ plt.bar(
951
+ x + bar_width / 2,
952
+ task_type_data['anna'],
953
+ bar_width,
954
+ label="Anna's Rating",
955
+ color=colors['anna'],
956
+ edgecolor='white',
957
+ linewidth=1.5,
958
+ alpha=0.9,
959
+ )
960
+
961
+ # Add labels and title with enhanced styling
962
+ plt.xlabel('Task Type', fontsize=12, fontweight='bold')
963
+ plt.ylabel('Average Score (%)', fontsize=12, fontweight='bold')
964
+ plt.title('Task Type Performance Analysis', fontsize=16, fontweight='bold')
965
+
966
+ # Add xticks on the middle of the group bars with better formatting
967
+ plt.xticks(x, task_type_data['name'], rotation=30, ha='right', fontsize=11, fontweight='bold')
968
+
969
+ # Create enhanced legend
970
+ legend = plt.legend(
971
+ loc='upper center', bbox_to_anchor=(0.5, -0.15), facecolor='white', edgecolor='#dddddd', shadow=True, ncol=2, fontsize=12
972
+ )
973
+
974
+ # Add value labels on top of each bar
975
+ for i, v in enumerate(task_type_data['arthur']):
976
+ plt.text(i - bar_width / 2, v + 2, str(v), ha='center', fontsize=9, fontweight='bold')
977
+
978
+ for i, v in enumerate(task_type_data['anna']):
979
+ plt.text(i + bar_width / 2, v + 2, str(v), ha='center', fontsize=9, fontweight='bold')
980
+
981
+ # Add grid
982
+ plt.grid(True, linestyle='--', alpha=0.7, axis='y')
983
+
984
+ # Add a border to the plot
985
+ ax = plt.gca()
986
+ for spine in ax.spines.values():
987
+ spine.set_edgecolor('#dddddd')
988
+ spine.set_linewidth(1.5)
989
+
990
+ # Adjust layout
991
+ plt.tight_layout()
992
+
993
+ return plt.gca()
994
+ return (plot_task_type_performance,)
995
 
996
 
997
  @app.cell
998
+ def _(
999
+ display_platform_evaluation,
1000
+ platform_summary,
1001
+ plot_platform_radar,
1002
+ plot_task_performance,
1003
+ ):
1004
+ def analyze_platform(platform_name='DeepSeek'):
1005
+ """Create a comprehensive analysis for a single platform"""
1006
+
1007
+ # Display the platform name
1008
+ print(f'# AI Platform In-Depth Analysis: {platform_name}\n')
1009
+
1010
+ # Create the radar chart for metrics
1011
+ print('## Performance Metrics')
1012
+ plot_platform_radar(platform_name)
1013
+
1014
+ # Show task performance
1015
+ print('\n## Task Performance')
1016
+ plot_task_performance(platform_name)
1017
+
1018
+ # Display strengths and weaknesses
1019
+ print('\n## Platform Evaluation')
1020
+ display_platform_evaluation(platform_name)
1021
+
1022
+ # Show platform summary
1023
+ print('\n## Platform Summary')
1024
+ platform_summary(platform_name)
1025
+
1026
+ return None
1027
+ return (analyze_platform,)
1028
+
1029
+
1030
+ @app.cell
1031
+ def _(compare_all_platforms):
1032
+ # Compare all platforms
1033
+ compare_all_platforms()
1034
  return
1035
 
1036
 
1037
  @app.cell
1038
+ def _(platforms_data):
1039
+ def platform_selector():
1040
+ """Prints available platforms and prompt for selection"""
1041
+ print('Available platforms for analysis:')
1042
+ for i, platform in enumerate(platforms_data.keys(), 1):
1043
+ print(f'{i}. {platform}')
1044
+
1045
+ print('\nTo analyze a platform, run:')
1046
+ print('analyze_platform("platform_name")')
1047
+ print('\nTo compare all platforms, run:')
1048
+ print('compare_all_platforms()')
1049
+
1050
+ return None
1051
+
1052
+
1053
+ # Display available platforms
1054
+ platform_selector()
1055
+ return (platform_selector,)
1056
 
1057
 
1058
  @app.cell
1059
+ def _(compare_all_platforms):
1060
+ compare_all_platforms()
1061
+ return
1062
 
1063
 
1064
  @app.cell
1065
+ def _():
 
1066
  return
1067
 
1068
 
1069
  @app.cell
1070
+ def _(plot_platform_radar_interactive):
1071
+ # This function appears to be defined but not called
1072
+ plot_platform_radar_interactive('DeepSeek')
1073
  return
1074
 
1075
 
1076
+ @app.cell
1077
+ def _(plot_platform_radar_interactive):
1078
+ # This function appears to be defined but not called
1079
+ plot_platform_radar_interactive('DeepSeek')
1080
+ return
1081
 
 
 
 
1082
 
1083
+ @app.cell
1084
+ def _(compare_all_platforms_interactive):
1085
+ # Execute the compare_all_platforms_interactive function
1086
+ compare_all_platforms_interactive()
1087
+ return
1088
 
1089
+
1090
+ @app.cell
1091
+ def _(platform_selector):
1092
+ # Call platform_selector to display available platforms
1093
+ platform_selector()
 
 
 
1094
  return
1095
 
1096
 
1097
+ @app.cell
1098
+ def _():
1099
+ return
 
 
1100
 
 
 
1101
 
1102
+ @app.cell
1103
+ def _(pd):
1104
+ import json
1105
+ from IPython.display import HTML, display
1106
+
1107
+ # Convert the agreement data into a Python structure
1108
+ agreement_data = [
1109
+ {'platform': 'Chat GPT', 'arthurValue': 1.5, 'annaValue': 1.25, 'category': 'Helpfulness'},
1110
+ {'platform': 'CoPilot', 'arthurValue': 1.0, 'annaValue': 1.33, 'category': 'Helpfulness'},
1111
+ {'platform': 'DeepSeek', 'arthurValue': 1.33, 'annaValue': 2.0, 'category': 'Helpfulness'},
1112
+ {'platform': 'GC AI', 'arthurValue': 1.4, 'annaValue': 0.5, 'category': 'Helpfulness'},
1113
+ {'platform': 'Notebook LM', 'arthurValue': 0.8, 'annaValue': 1.2, 'category': 'Helpfulness'},
1114
+ {'platform': 'Vecflow', 'arthurValue': 0.6, 'annaValue': 0.6, 'category': 'Helpfulness'},
1115
+ {'platform': 'Chat GPT', 'arthurValue': 1.75, 'annaValue': 1.25, 'category': 'Adequate Length'},
1116
+ {'platform': 'CoPilot', 'arthurValue': 1.2, 'annaValue': 1.33, 'category': 'Adequate Length'},
1117
+ {'platform': 'DeepSeek', 'arthurValue': 2.0, 'annaValue': 1.67, 'category': 'Adequate Length'},
1118
+ {'platform': 'GC AI', 'arthurValue': 1.8, 'annaValue': 1.0, 'category': 'Adequate Length'},
1119
+ {'platform': 'Notebook LM', 'arthurValue': 1.6, 'annaValue': 2.0, 'category': 'Adequate Length'},
1120
+ {'platform': 'Vecflow', 'arthurValue': 1.8, 'annaValue': 1.4, 'category': 'Adequate Length'},
1121
+ ]
1122
+
1123
+ # Convert pass/fail agreement data
1124
+ pass_fail_agreement = [
1125
+ {'platform': 'Chat GPT', 'arthur': 100, 'anna': 40, 'agreement': 'Disagree'},
1126
+ {'platform': 'CoPilot', 'arthur': 40, 'anna': 60, 'agreement': 'Disagree'},
1127
+ {'platform': 'DeepSeek', 'arthur': 75, 'anna': 100, 'agreement': 'Disagree'},
1128
+ {'platform': 'GC AI', 'arthur': 60, 'anna': 40, 'agreement': 'Disagree'},
1129
+ {'platform': 'Notebook LM', 'arthur': 60, 'anna': 60, 'agreement': 'Agree'},
1130
+ {'platform': 'Vecflow', 'arthur': 60, 'anna': 40, 'agreement': 'Disagree'},
1131
+ ]
1132
+
1133
+
1134
+ # Calculate correlations using pandas for accuracy
1135
+ def calculate_correlations():
1136
+ helpfulness_data = pd.DataFrame([item for item in agreement_data if item['category'] == 'Helpfulness'])
1137
+ adequate_length_data = pd.DataFrame([item for item in agreement_data if item['category'] == 'Adequate Length'])
1138
+ pass_fail_data = pd.DataFrame(pass_fail_agreement)
1139
+
1140
+ helpfulness_correlation = helpfulness_data['arthurValue'].corr(helpfulness_data['annaValue'])
1141
+ adequate_length_correlation = adequate_length_data['arthurValue'].corr(adequate_length_data['annaValue'])
1142
+ pass_rate_correlation = pass_fail_data['arthur'].corr(pass_fail_data['anna'])
1143
+
1144
+ return {
1145
+ 'helpfulness': round(helpfulness_correlation, 2),
1146
+ 'adequate_length': round(adequate_length_correlation, 2),
1147
+ 'pass_rate': round(pass_rate_correlation, 2),
1148
+ }
1149
+
1150
+
1151
+ correlations = calculate_correlations()
1152
+ return (
1153
+ HTML,
1154
+ agreement_data,
1155
+ calculate_correlations,
1156
+ correlations,
1157
+ display,
1158
+ json,
1159
+ pass_fail_agreement,
1160
  )
1161
+
1162
+
1163
+ @app.cell
1164
+ def _(correlations):
1165
+ correlations
1166
  return
1167
 
1168
 
1169
+ @app.cell
1170
+ def _(
1171
+ agree_count,
1172
+ agreement_data,
1173
+ calculate_average_metrics,
1174
+ correlations,
1175
+ disagree_count,
1176
+ np,
1177
+ pass_fail_agreement,
1178
+ pd,
1179
+ plt,
1180
+ ):
1181
+ def _():
1182
+ def _():
1183
+ def interactive_evaluator_dashboard():
1184
+ """Display an interactive dashboard for evaluator analysis"""
1185
+ from IPython.display import display, Markdown, HTML
1186
+
1187
+ # Display header
1188
+ display(
1189
+ HTML("""
1190
+ <div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; text-align: center; margin-bottom: 20px;">
1191
+ <h1 style="color: #333; margin-bottom: 10px;">Evaluator Comparison Analysis</h1>
1192
+ <p style="font-style: italic; color: #666;">Analyzing differences between Arthur's and Anna's evaluations</p>
1193
+ </div>
1194
+ """)
1195
+ )
1196
+
1197
+ # Display Agreement Section
1198
+ display(Markdown('## Agreement Overview'))
1199
+
1200
+ # Create side-by-side visualizations
1201
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 7))
1202
+
1203
+ # Agreement Pie Chart
1204
+ labels = ['Agreement', 'Disagreement']
1205
+ sizes = [agree_count, disagree_count]
1206
+ colors = ['#4CAF50', '#F44336']
1207
+ explode = (0.1, 0)
1208
+
1209
+ ax1.pie(
1210
+ sizes,
1211
+ explode=explode,
1212
+ labels=labels,
1213
+ colors=colors,
1214
+ autopct='%1.1f%%',
1215
+ shadow=True,
1216
+ startangle=140,
1217
+ textprops={'fontsize': 12, 'fontweight': 'bold'},
1218
+ )
1219
+ ax1.set_title('Evaluator Pass/Fail Agreement', fontsize=16, fontweight='bold')
1220
+
1221
+ # Average Scores Bar Chart
1222
+ avg_df = calculate_average_metrics()
1223
+
1224
+ # Set width of bars
1225
+ bar_width = 0.35
1226
+ x = np.arange(len(avg_df))
1227
+
1228
+ # Create bars
1229
+ ax2.bar(
1230
+ x - bar_width / 2,
1231
+ avg_df['Arthur'],
1232
+ width=bar_width,
1233
+ label="Arthur's Avg",
1234
+ color='#8884d8',
1235
+ edgecolor='white',
1236
+ linewidth=1.5,
1237
+ )
1238
+ ax2.bar(
1239
+ x + bar_width / 2, avg_df['Anna'], width=bar_width, label="Anna's Avg", color='#82ca9d', edgecolor='white', linewidth=1.5
1240
+ )
1241
+
1242
+ # Add data labels
1243
+ for i in range(len(x)):
1244
+ ax2.text(
1245
+ x[i] - bar_width / 2,
1246
+ avg_df['Arthur'][i] + 0.05,
1247
+ f'{avg_df["Arthur"][i]:.2f}',
1248
+ ha='center',
1249
+ va='bottom',
1250
+ fontweight='bold',
1251
+ fontsize=10,
1252
+ )
1253
+ ax2.text(
1254
+ x[i] + bar_width / 2,
1255
+ avg_df['Anna'][i] + 0.05,
1256
+ f'{avg_df["Anna"][i]:.2f}',
1257
+ ha='center',
1258
+ va='bottom',
1259
+ fontweight='bold',
1260
+ fontsize=10,
1261
+ )
1262
+
1263
+ # Customize plot
1264
+ ax2.set_xlabel('Category', fontsize=12, fontweight='bold')
1265
+ ax2.set_ylabel('Average Score', fontsize=12, fontweight='bold')
1266
+ ax2.set_title('Average Scores by Evaluator', fontsize=16, fontweight='bold')
1267
+ ax2.set_xticks(x)
1268
+ ax2.set_xticklabels(avg_df['Category'], fontsize=12)
1269
+ ax2.set_ylim(0, 2.2)
1270
+ ax2.grid(axis='y', linestyle='--', alpha=0.7)
1271
+ ax2.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=2, fontsize=12)
1272
+
1273
+ plt.tight_layout()
1274
+ display(plt.gcf())
1275
+ plt.close()
1276
+
1277
+ # Now show correlation analysis
1278
+ display(Markdown('## Correlation Analysis'))
1279
+
1280
+ # Create correlations chart
1281
+ fig, ax = plt.subplots(figsize=(10, 6))
1282
+
1283
+ metrics = ['Helpfulness', 'Adequate Length', 'Pass Rate']
1284
+ corr_values = [correlations['helpfulness'], correlations['adequate_length'], correlations['pass_rate']]
1285
+
1286
+ bars = ax.bar(metrics, corr_values)
1287
+
1288
+ # Colorize bars based on correlation (positive or negative)
1289
+ for i, bar in enumerate(bars):
1290
+ if corr_values[i] < 0:
1291
+ bar.set_color('#F44336') # red for negative correlation
1292
+ else:
1293
+ bar.set_color('#4CAF50') # green for positive correlation
1294
+
1295
+ # Add correlation values above/below bars
1296
+ for i, v in enumerate(corr_values):
1297
+ if v >= 0:
1298
+ ax.text(i, v + 0.05, f'{v:.2f}', ha='center', fontweight='bold')
1299
+ else:
1300
+ ax.text(i, v - 0.1, f'{v:.2f}', ha='center', fontweight='bold')
1301
+
1302
+ # Add reference line at y=0
1303
+ ax.axhline(y=0, color='black', linestyle='-', alpha=0.3)
1304
+
1305
+ # Set y-axis limits to show the full range -1 to 1
1306
+ ax.set_ylim(-1.1, 1.1)
1307
+ ax.set_title('Evaluator Correlation Analysis', fontsize=14, fontweight='bold')
1308
+ ax.set_ylabel('Correlation Coefficient', fontsize=12)
1309
+ ax.text(
1310
+ 1,
1311
+ -0.9,
1312
+ 'Range: -1 to 1, where 1 is perfect positive correlation,\n-1 is perfect negative correlation, and 0 is no correlation',
1313
+ fontsize=8,
1314
+ ha='center',
1315
+ style='italic',
1316
+ )
1317
+
1318
+ plt.tight_layout()
1319
+ display(plt.gcf())
1320
+ plt.close()
1321
+
1322
+ # Display scatter plots
1323
+ display(Markdown('## Score Comparison Scatter Plots'))
1324
+
1325
+ # Create a 1x2 grid for helpfulness and adequate length scatter plots
1326
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 7))
1327
+
1328
+ # Helpfulness Scatter Plot
1329
+ helpfulness_data = [item for item in agreement_data if item['category'] == 'Helpfulness']
1330
+ x1 = [item['arthurValue'] for item in helpfulness_data]
1331
+ y1 = [item['annaValue'] for item in helpfulness_data]
1332
+ platforms1 = [item['platform'] for item in helpfulness_data]
1333
+
1334
+ scatter1 = ax1.scatter(x1, y1, c='#8884d8', s=100, alpha=0.7)
1335
+
1336
+ # Add platform labels
1337
+ for i, platform in enumerate(platforms1):
1338
+ ax1.annotate(platform, (x1[i], y1[i]), textcoords='offset points', xytext=(0, 10), ha='center')
1339
+
1340
+ # Add axis labels
1341
+ ax1.set_xlabel("Arthur's Rating", fontsize=12)
1342
+ ax1.set_ylabel("Anna's Rating", fontsize=12)
1343
+ ax1.set_title('Helpfulness Correlation', fontsize=14, fontweight='bold')
1344
+
1345
+ # Set axis limits
1346
+ ax1.set_xlim(0, 2)
1347
+ ax1.set_ylim(0, 2)
1348
+
1349
+ # Add perfect correlation line
1350
+ ax1.plot([0, 2], [0, 2], 'k--', alpha=0.3)
1351
+
1352
+ # Add correlation value text
1353
+ ax1.text(0.1, 1.8, f'Correlation: {correlations["helpfulness"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
1354
+ ax1.grid(True, linestyle='--', alpha=0.3)
1355
+
1356
+ # Adequate Length Scatter Plot
1357
+ adequate_length_data = [item for item in agreement_data if item['category'] == 'Adequate Length']
1358
+ x2 = [item['arthurValue'] for item in adequate_length_data]
1359
+ y2 = [item['annaValue'] for item in adequate_length_data]
1360
+ platforms2 = [item['platform'] for item in adequate_length_data]
1361
+
1362
+ scatter2 = ax2.scatter(x2, y2, c='#82ca9d', s=100, alpha=0.7)
1363
+
1364
+ # Add platform labels
1365
+ for i, platform in enumerate(platforms2):
1366
+ ax2.annotate(platform, (x2[i], y2[i]), textcoords='offset points', xytext=(0, 10), ha='center')
1367
+
1368
+ # Add axis labels
1369
+ ax2.set_xlabel("Arthur's Rating", fontsize=12)
1370
+ ax2.set_ylabel("Anna's Rating", fontsize=12)
1371
+ ax2.set_title('Adequate Length Correlation', fontsize=14, fontweight='bold')
1372
+
1373
+ # Set axis limits
1374
+ ax2.set_xlim(0, 2)
1375
+ ax2.set_ylim(0, 2)
1376
+
1377
+ # Add perfect correlation line
1378
+ ax2.plot([0, 2], [0, 2], 'k--', alpha=0.3)
1379
+
1380
+ # Add correlation value text
1381
+ ax2.text(0.1, 1.8, f'Correlation: {correlations["adequate_length"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
1382
+ ax2.grid(True, linestyle='--', alpha=0.3)
1383
+
1384
+ plt.tight_layout()
1385
+ display(plt.gcf())
1386
+ plt.close()
1387
+
1388
+ # Pass Rate Correlation Scatter Plot
1389
+ display(Markdown('## Pass Rate Comparison'))
1390
+
1391
+ plt.figure(figsize=(10, 6))
1392
+ x = [item['arthur'] for item in pass_fail_agreement]
1393
+ y = [item['anna'] for item in pass_fail_agreement]
1394
+ platforms = [item['platform'] for item in pass_fail_agreement]
1395
+ colors = ['#4CAF50' if item['agreement'] == 'Agree' else '#F44336' for item in pass_fail_agreement]
1396
+
1397
+ scatter = plt.scatter(x, y, c=colors, s=100, alpha=0.7)
1398
+
1399
+ # Add platform labels
1400
+ for i, platform in enumerate(platforms):
1401
+ plt.annotate(platform, (x[i], y[i]), textcoords='offset points', xytext=(0, 10), ha='center')
1402
+
1403
+ # Add axis labels
1404
+ plt.xlabel("Arthur's Pass Rate (%)", fontsize=12)
1405
+ plt.ylabel("Anna's Pass Rate (%)", fontsize=12)
1406
+ plt.title('Pass Rate Correlation', fontsize=14, fontweight='bold')
1407
+
1408
+ # Set axis limits
1409
+ plt.xlim(30, 105)
1410
+ plt.ylim(30, 105)
1411
+
1412
+ # Add perfect correlation line
1413
+ plt.plot([30, 105], [30, 105], 'k--', alpha=0.3)
1414
+
1415
+ # Add correlation value text
1416
+ plt.text(35, 95, f'Correlation: {correlations["pass_rate"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
1417
+
1418
+ # Add legend
1419
+ from matplotlib.lines import Line2D
1420
+
1421
+ legend_elements = [
1422
+ Line2D([0], [0], marker='o', color='w', markerfacecolor='#4CAF50', markersize=10, label='Agreement'),
1423
+ Line2D([0], [0], marker='o', color='w', markerfacecolor='#F44336', markersize=10, label='Disagreement'),
1424
+ ]
1425
+ plt.legend(handles=legend_elements, loc='upper left')
1426
+
1427
+ plt.grid(True, linestyle='--', alpha=0.3)
1428
+ plt.tight_layout()
1429
+ display(plt.gcf())
1430
+ plt.close()
1431
+
1432
+ # Platform-specific differences
1433
+ display(Markdown('## Platform-specific Evaluator Differences'))
1434
+
1435
+ # Calculate platform differences if not already done
1436
+ if not 'display_df' in globals():
1437
+ platform_differences = []
1438
+ for platform in set(item['platform'] for item in agreement_data):
1439
+ helpfulness = next(
1440
+ (item for item in agreement_data if item['platform'] == platform and item['category'] == 'Helpfulness'), None
1441
+ )
1442
+ adequate_length = next(
1443
+ (item for item in agreement_data if item['platform'] == platform and item['category'] == 'Adequate Length'), None
1444
+ )
1445
+ pass_fail = next((item for item in pass_fail_agreement if item['platform'] == platform), None)
1446
+
1447
+ if helpfulness and adequate_length and pass_fail:
1448
+ helpfulness_diff = helpfulness['arthurValue'] - helpfulness['annaValue']
1449
+ adequate_length_diff = adequate_length['arthurValue'] - adequate_length['annaValue']
1450
+ pass_rate_diff = pass_fail['arthur'] - pass_fail['anna']
1451
+
1452
+ return platform_differences.append(
1453
+ {
1454
+ 'Platform': platform,
1455
+ 'Helpfulness Diff': helpfulness_diff,
1456
+ 'Adequate Length Diff': adequate_length_diff,
1457
+ 'Pass Rate Diff': pass_rate_diff,
1458
+ 'Agreement': pass_fail['agreement'],
1459
+ }
1460
+ )
1461
 
1462
+ platform_differences = []
1463
 
1464
+ for platform in set(item['platform'] for item in agreement_data):
1465
+ helpfulness = next((item for item in agreement_data if item['platform'] == platform and item['category'] == 'Helpfulness'), None)
1466
+ adequate_length = next(
1467
+ (item for item in agreement_data if item['platform'] == platform and item['category'] == 'Adequate Length'), None
1468
+ )
1469
+ pass_fail = next((item for item in pass_fail_agreement if item['platform'] == platform), None)
1470
+
1471
+ if helpfulness and adequate_length and pass_fail:
1472
+ helpfulness_diff = helpfulness['arthurValue'] - helpfulness['annaValue']
1473
+ adequate_length_diff = adequate_length['arthurValue'] - adequate_length['annaValue']
1474
+ pass_rate_diff = pass_fail['arthur'] - pass_fail['anna']
1475
+ return platform_differences.append(
1476
+ {
1477
+ 'Platform': platform,
1478
+ 'Helpfulness Diff': helpfulness_diff,
1479
+ 'Adequate Length Diff': adequate_length_diff,
1480
+ 'Pass Rate Diff': pass_rate_diff,
1481
+ 'Agreement': pass_fail['agreement'],
1482
+ }
1483
+ )
1484
 
1485
+ platform_diff_df = pd.DataFrame(platform_differences)
 
1486
 
1487
 
1488
+ _()
1489
+ return
1490
 
 
 
 
1491
 
1492
+ @app.cell
1493
+ def _(correlations, plt):
1494
+ # Creating Correlation Analysis Chart
1495
+ fig, ax = plt.subplots(figsize=(10, 6))
1496
+
1497
+ metrics = ['Helpfulness', 'Adequate Length', 'Pass Rate']
1498
+ corr_values = [correlations['helpfulness'], correlations['adequate_length'], correlations['pass_rate']]
1499
+
1500
+ bars = ax.bar(metrics, corr_values, color=['#8884d8', '#82ca9d', '#ff7300'])
1501
+
1502
+ # Colorize bars based on correlation (positive or negative)
1503
+ for i, bar in enumerate(bars):
1504
+ if corr_values[i] < 0:
1505
+ bar.set_color('#F44336') # red for negative correlation
1506
+ else:
1507
+ bar.set_color('#4CAF50') # green for positive correlation
1508
+
1509
+ # Add correlation values above/below bars
1510
+ for i, v in enumerate(corr_values):
1511
+ if v >= 0:
1512
+ ax.text(i, v + 0.05, f'{v:.2f}', ha='center', fontweight='bold')
1513
+ else:
1514
+ ax.text(i, v - 0.1, f'{v:.2f}', ha='center', fontweight='bold')
1515
+
1516
+ # Add reference line at y=0
1517
+ ax.axhline(y=0, color='black', linestyle='-', alpha=0.3)
1518
+
1519
+ # Set y-axis limits to show the full range -1 to 1
1520
+ ax.set_ylim(-1.1, 1.1)
1521
+
1522
+ # Add labels and title
1523
+ ax.set_title('Evaluator Correlation Analysis', fontsize=14, fontweight='bold')
1524
+ ax.set_ylabel('Correlation Coefficient', fontsize=12)
1525
+ ax.text(
1526
+ 1,
1527
+ -0.9,
1528
+ 'Range: -1 to 1, where 1 is perfect positive correlation,\n-1 is perfect negative correlation, and 0 is no correlation',
1529
+ fontsize=8,
1530
+ ha='center',
1531
+ style='italic',
1532
+ )
1533
 
1534
+ plt.tight_layout()
1535
+ return ax, bar, bars, corr_values, fig, i, metrics, v
1536
 
 
 
 
1537
 
1538
+ @app.cell
1539
+ def _(agreement_data, correlations, plt):
1540
+ def _():
1541
+ # Create Helpfulness Correlation Scatter Plot
1542
+ helpfulness_data = [item for item in agreement_data if item['category'] == 'Helpfulness']
1543
 
1544
+ fig, ax = plt.subplots(figsize=(8, 6))
1545
+ x = [item['arthurValue'] for item in helpfulness_data]
1546
+ y = [item['annaValue'] for item in helpfulness_data]
1547
+ platforms = [item['platform'] for item in helpfulness_data]
 
 
 
 
 
 
1548
 
1549
+ scatter = ax.scatter(x, y, c='#8884d8', s=100, alpha=0.7)
1550
 
1551
+ # Add platform labels
1552
+ for i, platform in enumerate(platforms):
1553
+ ax.annotate(platform, (x[i], y[i]), textcoords='offset points', xytext=(0, 10), ha='center')
1554
 
1555
+ # Add axis labels
1556
+ ax.set_xlabel("Arthur's Rating", fontsize=12)
1557
+ ax.set_ylabel("Anna's Rating", fontsize=12)
1558
+ ax.set_title('Helpfulness Correlation', fontsize=14, fontweight='bold')
 
1559
 
1560
+ # Set axis limits
1561
+ ax.set_xlim(0, 2)
1562
+ ax.set_ylim(0, 2)
1563
 
1564
+ # Add perfect correlation line
1565
+ ax.plot([0, 2], [0, 2], 'k--', alpha=0.3)
 
 
 
1566
 
1567
+ # Add correlation value text
1568
+ ax.text(0.1, 1.8, f'Correlation: {correlations["helpfulness"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
1569
+
1570
+ plt.grid(True, linestyle='--', alpha=0.3)
1571
+ return plt.tight_layout()
1572
+
1573
+
1574
+ _()
1575
  return
1576
 
1577
 
1578
  @app.cell
1579
+ def _(agreement_data, pass_fail_agreement, pd):
1580
+ def _():
1581
+ # Create a DataFrame to show platform-specific differences
1582
+ platform_differences = []
1583
+
1584
+ for platform in set(item['platform'] for item in agreement_data):
1585
+ helpfulness = next((item for item in agreement_data if item['platform'] == platform and item['category'] == 'Helpfulness'), None)
1586
+ adequate_length = next(
1587
+ (item for item in agreement_data if item['platform'] == platform and item['category'] == 'Adequate Length'), None
1588
+ )
1589
+ pass_fail = next((item for item in pass_fail_agreement if item['platform'] == platform), None)
1590
+
1591
+ if helpfulness and adequate_length and pass_fail:
1592
+ helpfulness_diff = helpfulness['arthurValue'] - helpfulness['annaValue']
1593
+ adequate_length_diff = adequate_length['arthurValue'] - adequate_length['annaValue']
1594
+ pass_rate_diff = pass_fail['arthur'] - pass_fail['anna']
1595
+ return platform_differences.append(
1596
+ {
1597
+ 'Platform': platform,
1598
+ 'Helpfulness Diff': helpfulness_diff,
1599
+ 'Adequate Length Diff': adequate_length_diff,
1600
+ 'Pass Rate Diff': pass_rate_diff,
1601
+ 'Agreement': pass_fail['agreement'],
1602
+ }
1603
+ )
1604
+
1605
+ platform_diff_df = pd.DataFrame(platform_differences)
1606
+
1607
+ # Display platform differences
1608
+ platform_diff_df['Helpfulness Diff'] = platform_diff_df['Helpfulness Diff'].round(1)
1609
+ platform_diff_df['Adequate Length Diff'] = platform_diff_df['Adequate Length Diff'].round(1)
1610
+ platform_diff_df['Pass Rate Diff'] = platform_diff_df['Pass Rate Diff'].astype(int)
1611
 
1612
+ def style_diff(val):
1613
+ if val > 0:
1614
+ return f'Arthur +{abs(val)}'
1615
+ elif val < 0:
1616
+ return f'Anna +{abs(val)}'
1617
+ else:
1618
+ return 'Equal'
1619
 
1620
+ # Apply styling and display the data
1621
+ styled_platform_diff = platform_diff_df.copy()
1622
+ styled_platform_diff['Helpfulness'] = styled_platform_diff['Helpfulness Diff'].apply(style_diff)
1623
+ styled_platform_diff['Adequate Length'] = styled_platform_diff['Adequate Length Diff'].apply(style_diff)
1624
+ styled_platform_diff['Pass Rate'] = styled_platform_diff['Pass Rate Diff'].apply(style_diff)
1625
+
1626
+ display_cols = ['Platform', 'Helpfulness', 'Adequate Length', 'Pass Rate', 'Agreement']
1627
+ display_df = styled_platform_diff[display_cols]
1628
+
1629
+
1630
+ _()
1631
  return
1632
 
1633
 
1634
+ @app.cell
1635
+ def _(calculate_average_metrics, np, plt):
1636
+ def plot_average_scores():
1637
+ """Plot the average scores for each category by evaluator"""
1638
+ # Get average data
1639
+ avg_df = calculate_average_metrics()
1640
+
1641
+ # Set up plot
1642
+ plt.figure(figsize=(10, 6))
1643
+
1644
+ # Set width of bars
1645
+ bar_width = 0.35
1646
+ x = np.arange(len(avg_df))
1647
+
1648
+ # Create bars
1649
+ plt.bar(
1650
+ x - bar_width / 2,
1651
+ avg_df['Arthur'],
1652
+ width=bar_width,
1653
+ label="Arthur's Avg. Score",
1654
+ color='#8884d8',
1655
+ alpha=0.8,
1656
+ edgecolor='white',
1657
+ linewidth=1.5,
1658
+ )
1659
+ plt.bar(
1660
+ x + bar_width / 2,
1661
+ avg_df['Anna'],
1662
+ width=bar_width,
1663
+ label="Anna's Avg. Score",
1664
+ color='#82ca9d',
1665
+ alpha=0.8,
1666
+ edgecolor='white',
1667
+ linewidth=1.5,
1668
+ )
1669
+
1670
+ # Add data labels
1671
+ for i in range(len(x)):
1672
+ plt.text(
1673
+ x[i] - bar_width / 2,
1674
+ avg_df['Arthur'][i] + 0.05,
1675
+ f'{avg_df["Arthur"][i]:.2f}',
1676
+ ha='center',
1677
+ va='bottom',
1678
+ color='#333',
1679
+ fontweight='bold',
1680
+ )
1681
+ plt.text(
1682
+ x[i] + bar_width / 2,
1683
+ avg_df['Anna'][i] + 0.05,
1684
+ f'{avg_df["Anna"][i]:.2f}',
1685
+ ha='center',
1686
+ va='bottom',
1687
+ color='#333',
1688
+ fontweight='bold',
1689
+ )
1690
+
1691
+ # Customize plot
1692
+ plt.xlabel('Evaluation Category', fontsize=12, fontweight='bold')
1693
+ plt.ylabel('Average Score (0-2 scale)', fontsize=12, fontweight='bold')
1694
+ plt.title('Average Scores by Evaluator', fontsize=14, fontweight='bold')
1695
+ plt.xticks(x, avg_df['Category'], fontsize=11)
1696
+ plt.ylim(0, 2.2) # Set reasonable y-axis limit
1697
+ plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2)
1698
+ plt.grid(axis='y', linestyle='--', alpha=0.7)
1699
+
1700
+ # Add a border to the plot
1701
+ ax = plt.gca()
1702
+ for spine in ax.spines.values():
1703
+ spine.set_edgecolor('#dddddd')
1704
+ spine.set_linewidth(1.5)
1705
+
1706
+ plt.tight_layout()
1707
+
1708
+ return plt.gca()
1709
+ return (plot_average_scores,)
1710
+
1711
+
1712
+ @app.cell
1713
+ def _(agreement_data, pass_fail_agreement, pd):
1714
+ def calculate_average_metrics():
1715
+ """Calculate average metrics for each evaluator and category"""
1716
+ # Process helpfulness data
1717
+ helpfulness_data = [item for item in agreement_data if item['category'] == 'Helpfulness']
1718
+ arthur_helpfulness = sum(item['arthurValue'] for item in helpfulness_data) / len(helpfulness_data)
1719
+ anna_helpfulness = sum(item['annaValue'] for item in helpfulness_data) / len(helpfulness_data)
1720
+
1721
+ # Process adequate length data
1722
+ adequate_length_data = [item for item in agreement_data if item['category'] == 'Adequate Length']
1723
+ arthur_adequate = sum(item['arthurValue'] for item in adequate_length_data) / len(adequate_length_data)
1724
+ anna_adequate = sum(item['annaValue'] for item in adequate_length_data) / len(adequate_length_data)
1725
+
1726
+ # Create DataFrame with results
1727
+ avg_df = pd.DataFrame(
1728
+ {
1729
+ 'Category': ['Helpfulness', 'Adequate Length'],
1730
+ 'Arthur': [arthur_helpfulness, arthur_adequate],
1731
+ 'Anna': [anna_helpfulness, anna_adequate],
1732
+ }
1733
+ )
1734
+
1735
+ return avg_df
1736
+
1737
+
1738
+ # Count agreement vs disagreement
1739
+ agree_count = sum(1 for item in pass_fail_agreement if item['agreement'] == 'Agree')
1740
+ disagree_count = sum(1 for item in pass_fail_agreement if item['agreement'] == 'Disagree')
1741
+ return agree_count, calculate_average_metrics, disagree_count
1742
+
1743
+
1744
+ @app.cell
1745
+ def _(plot_average_scores):
1746
+ plot_average_scores()
1747
  return
1748
 
1749
 
1750
+ @app.cell
1751
+ def _(agree_count, ax, calculate_average_metrics, disagree_count, np, plt):
1752
+ def interactive_evaluator_dashboard():
1753
+ """Display an interactive dashboard for evaluator analysis"""
1754
+ from IPython.display import display, Markdown, HTML
1755
+
1756
+ # Display header
1757
+ display(
1758
+ HTML("""
1759
+ <div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; text-align: center; margin-bottom: 20px;">
1760
+ <h1 style="color: #333; margin-bottom: 10px;">Evaluator Comparison Analysis</h1>
1761
+ <p style="font-style: italic; color: #666;">Analyzing differences between Arthur's and Anna's evaluations</p>
1762
+ </div>
1763
+ """)
1764
+ )
1765
+
1766
+ # Display Agreement Section
1767
+ display(Markdown('## Agreement Overview'))
1768
+
1769
+ # Create side-by-side visualizations
1770
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 7))
1771
+
1772
+ # Agreement Pie Chart
1773
+ labels = ['Agreement', 'Disagreement']
1774
+ sizes = [agree_count, disagree_count]
1775
+ colors = ['#4CAF50', '#F44336']
1776
+ explode = (0.1, 0)
1777
+
1778
+ ax1.pie(
1779
+ sizes,
1780
+ explode=explode,
1781
+ labels=labels,
1782
+ colors=colors,
1783
+ autopct='%1.1f%%',
1784
+ shadow=True,
1785
+ startangle=140,
1786
+ textprops={'fontsize': 12, 'fontweight': 'bold'},
1787
+ )
1788
+ ax1.set_title('Evaluator Pass/Fail Agreement', fontsize=16, fontweight='bold')
1789
+
1790
+ # Average Scores Bar Chart
1791
+ avg_df = calculate_average_metrics()
1792
+
1793
+ # Set width of bars
1794
+ bar_width = 0.35
1795
+ x = np.arange(len(avg_df))
1796
+
1797
+ # Create bars
1798
+ ax2.bar(x - bar_width / 2, avg_df['Arthur'], width=bar_width, label="Arthur's Avg", color='#8884d8', edgecolor='white', linewidth=1.5)
1799
+ ax2.bar(x + bar_width / 2, avg_df['Anna'], width=bar_width, label="Anna's Avg", color='#82ca9d', edgecolor='white', linewidth=1.5)
1800
+
1801
+ # Add data labels
1802
+ for i in range(len(x)):
1803
+ ax2.text(
1804
+ x[i] - bar_width / 2,
1805
+ avg_df['Arthur'][i] + 0.05,
1806
+ f'{avg_df["Arthur"][i]:.2f}',
1807
+ ha='center',
1808
+ va='bottom',
1809
+ fontweight='bold',
1810
+ fontsize=10,
1811
+ )
1812
+ ax2.text(
1813
+ x[i] + bar_width / 2,
1814
+ avg_df['Anna'][i] + 0.05,
1815
+ f'{avg_df["Anna"][i]:.2f}',
1816
+ ha='center',
1817
+ va='bottom',
1818
+ fontweight='bold',
1819
+ fontsize=10,
1820
+ )
1821
+
1822
+ # Customize plot
1823
+ ax2.set_xlabel('Category', fontsize=12, fontweight='bold')
1824
+ ax2.set_ylabel('Average Score', fontsize=12, fontweight='bold')
1825
+ ax
1826
+ return (interactive_evaluator_dashboard,)
1827
+
1828
+
1829
+ @app.cell
1830
+ def _(interactive_evaluator_dashboard):
1831
+ interactive_evaluator_dashboard()
1832
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1833
 
1834
 
1835
  if __name__ == "__main__":