Spaces:
Sleeping
Sleeping
John Graham Reynolds
commited on
Commit
·
430f772
1
Parent(s):
346e008
try example again and add additional documentation
Browse files
app.py
CHANGED
@@ -7,6 +7,23 @@ from evaluate.utils import infer_gradio_input_types, json_to_string_type, parse_
|
|
7 |
from fixed_f1 import FixedF1
|
8 |
from pathlib import Path
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
metric = FixedF1()
|
11 |
|
12 |
if isinstance(metric.features, list):
|
@@ -30,7 +47,7 @@ def compute(input_df: pd.DataFrame, method: str):
|
|
30 |
metric.add_batch(predictions=predicted, references=references)
|
31 |
outputs = metric.compute()
|
32 |
|
33 |
-
return f"
|
34 |
|
35 |
space = gr.Interface(
|
36 |
fn=compute,
|
@@ -48,11 +65,14 @@ space = gr.Interface(
|
|
48 |
)
|
49 |
],
|
50 |
outputs=gr.Textbox(label=metric.name),
|
51 |
-
description=metric.info.description,
|
52 |
title=f"Metric: {metric.name}",
|
53 |
article=parse_readme(local_path / "README.md"),
|
54 |
examples=[
|
55 |
-
|
|
|
|
|
|
|
56 |
],
|
57 |
cache_examples=False
|
58 |
)
|
|
|
7 |
from fixed_f1 import FixedF1
|
8 |
from pathlib import Path
|
9 |
|
10 |
+
added_description = """
|
11 |
+
See the HF Space showing off how to combine various metrics here:
|
12 |
+
[MarioBarbeque/CombinedEvaluationMetrics](https://huggingface.co/spaces/MarioBarbeque/CombinedEvaluationMetrics)
|
13 |
+
|
14 |
+
In the specific use case of the `F1Fixed` metric, one writes the following:\n
|
15 |
+
|
16 |
+
```python
|
17 |
+
f1 = FixedF1(average=...)
|
18 |
+
|
19 |
+
f1.add_batch(predictions=..., references=...)
|
20 |
+
f1.compute()
|
21 |
+
```\n
|
22 |
+
|
23 |
+
where the `average` parameter can be different at instantiation time for each of the metrics. Acceptable values include `[None, 'micro', 'macro', 'weighted']` (
|
24 |
+
or `binary` if there exist only two labels). \n
|
25 |
+
"""
|
26 |
+
|
27 |
metric = FixedF1()
|
28 |
|
29 |
if isinstance(metric.features, list):
|
|
|
47 |
metric.add_batch(predictions=predicted, references=references)
|
48 |
outputs = metric.compute()
|
49 |
|
50 |
+
return f"The F1 score for these predictions is: \n {outputs}"
|
51 |
|
52 |
space = gr.Interface(
|
53 |
fn=compute,
|
|
|
65 |
)
|
66 |
],
|
67 |
outputs=gr.Textbox(label=metric.name),
|
68 |
+
description=metric.info.description + added_description,
|
69 |
title=f"Metric: {metric.name}",
|
70 |
article=parse_readme(local_path / "README.md"),
|
71 |
examples=[
|
72 |
+
[
|
73 |
+
pd.DataFrame(parse_test_cases(test_cases, feature_names, gradio_input_types)[0]),
|
74 |
+
"weighted"
|
75 |
+
],
|
76 |
],
|
77 |
cache_examples=False
|
78 |
)
|