John Graham Reynolds
commited on
Commit
·
288a6ca
1
Parent(s):
407fe90
update inputs keyword and type hints
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ from fixed_precision import FixedPrecision
|
|
3 |
from fixed_recall import FixedRecall
|
4 |
import evaluate
|
5 |
import gradio as gr
|
|
|
6 |
|
7 |
title = "'Combine' multiple metrics with this 🤗 Evaluate 🪲 Fix!"
|
8 |
|
@@ -22,27 +23,27 @@ Try to use \t to write some code? \t or how does that work? </p>
|
|
22 |
article = "<p style='text-align: center'> Check out the [original repo](https://github.com/johngrahamreynolds/FixedMetricsForHF) housing this code, and a quickly \
|
23 |
trained [multilabel text classification model](https://github.com/johngrahamreynolds/RoBERTa-base-DReiFT/tree/main) that makes use of it during evaluation.</p>"
|
24 |
|
25 |
-
def show_off(predictions
|
26 |
|
27 |
-
f1 = FixedF1(average=weighting_map["f1"])
|
28 |
-
precision = FixedPrecision(average=weighting_map["precision"])
|
29 |
-
recall = FixedRecall(average=weighting_map["recall"])
|
30 |
|
31 |
-
combined = evaluate.combine([f1, recall, precision])
|
32 |
-
|
33 |
-
combined.add_batch(prediction=predictions, reference=references)
|
34 |
-
outputs = combined.compute()
|
35 |
|
|
|
|
|
|
|
36 |
|
37 |
return "Your metrics are as follows: \n" + outputs
|
38 |
|
39 |
|
40 |
gr.Interface(
|
41 |
fn=show_off,
|
42 |
-
inputs="
|
43 |
outputs="text",
|
44 |
title=title,
|
45 |
description=description,
|
46 |
article=article,
|
47 |
-
examples=[[
|
48 |
).launch()
|
|
|
3 |
from fixed_recall import FixedRecall
|
4 |
import evaluate
|
5 |
import gradio as gr
|
6 |
+
import pandas as pd
|
7 |
|
8 |
title = "'Combine' multiple metrics with this 🤗 Evaluate 🪲 Fix!"
|
9 |
|
|
|
23 |
article = "<p style='text-align: center'> Check out the [original repo](https://github.com/johngrahamreynolds/FixedMetricsForHF) housing this code, and a quickly \
|
24 |
trained [multilabel text classification model](https://github.com/johngrahamreynolds/RoBERTa-base-DReiFT/tree/main) that makes use of it during evaluation.</p>"
|
25 |
|
26 |
+
def show_off(predictions: list[list]) -> str:
|
27 |
|
28 |
+
# f1 = FixedF1(average=weighting_map["f1"])
|
29 |
+
# precision = FixedPrecision(average=weighting_map["precision"])
|
30 |
+
# recall = FixedRecall(average=weighting_map["recall"])
|
31 |
|
32 |
+
# combined = evaluate.combine([f1, recall, precision])
|
|
|
|
|
|
|
33 |
|
34 |
+
# combined.add_batch(prediction=predictions, reference=references)
|
35 |
+
# outputs = combined.compute()
|
36 |
+
outputs = predictions
|
37 |
|
38 |
return "Your metrics are as follows: \n" + outputs
|
39 |
|
40 |
|
41 |
gr.Interface(
|
42 |
fn=show_off,
|
43 |
+
inputs="dataset",
|
44 |
outputs="text",
|
45 |
title=title,
|
46 |
description=description,
|
47 |
article=article,
|
48 |
+
examples=[[1, 0, 2, 0, 1]],
|
49 |
).launch()
|