Spaces:
Build error
Build error
Add some code instructions
Browse files- sescore.py +19 -22
sescore.py
CHANGED
@@ -55,38 +55,35 @@ class robertaEncoder(BERTEncoder):
|
|
55 |
|
56 |
# TODO: Add BibTeX citation
|
57 |
_CITATION = """\
|
58 |
-
@
|
59 |
-
title
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
62 |
}
|
63 |
"""
|
64 |
|
65 |
-
# TODO: Add description of the module here
|
66 |
_DESCRIPTION = """\
|
67 |
SEScore is an evaluation metric that trys to compute an overall score to measure text generation quality.
|
68 |
"""
|
69 |
|
70 |
-
|
71 |
-
# TODO: Add description of the arguments of the module here
|
72 |
_KWARGS_DESCRIPTION = """
|
73 |
-
Calculates how good are predictions given some references
|
74 |
Args:
|
75 |
-
predictions: list of
|
76 |
-
|
77 |
-
references: list of reference for each prediction. Each
|
78 |
-
reference should be a string with tokens separated by spaces.
|
79 |
Returns:
|
80 |
-
|
81 |
-
|
82 |
Examples:
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
{'accuracy': 1.0}
|
90 |
"""
|
91 |
|
92 |
# TODO: Define external resources urls if needed
|
@@ -95,7 +92,7 @@ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
|
|
95 |
|
96 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
97 |
class SEScore(evaluate.Metric):
|
98 |
-
"""
|
99 |
|
100 |
def _info(self):
|
101 |
# TODO: Specifies the evaluate.EvaluationModuleInfo object
|
|
|
55 |
|
56 |
# TODO: Add BibTeX citation
|
57 |
_CITATION = """\
|
58 |
+
@inproceedings{xu-etal-2022-not,
|
59 |
+
title={Not All Errors are Equal: Learning Text Generation Metrics using Stratified Error Synthesis},
|
60 |
+
author={Xu, Wenda and Tuan, Yi-lin and Lu, Yujie and Saxon, Michael and Li, Lei and Wang, William Yang},
|
61 |
+
booktitle ={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
|
62 |
+
month={dec},
|
63 |
+
year={2022},
|
64 |
+
url={https://arxiv.org/abs/2210.05035}
|
65 |
}
|
66 |
"""
|
67 |
|
|
|
68 |
_DESCRIPTION = """\
|
69 |
SEScore is an evaluation metric that trys to compute an overall score to measure text generation quality.
|
70 |
"""
|
71 |
|
|
|
|
|
72 |
_KWARGS_DESCRIPTION = """
|
73 |
+
Calculates how good are predictions given some references
|
74 |
Args:
|
75 |
+
predictions: list of candidate outputs
|
76 |
+
references: list of references
|
|
|
|
|
77 |
Returns:
|
78 |
+
{"mean_score": mean_score, "scores": scores}
|
79 |
+
|
80 |
Examples:
|
81 |
+
>>> import evaluate
|
82 |
+
>>> sescore = evaluate.load("xu1998hz/sescore")
|
83 |
+
>>> score = sescore.compute(
|
84 |
+
references=['sescore is a simple but effective next-generation text evaluation metric'],
|
85 |
+
predictions=['sescore is simple effective text evaluation metric for next generation']
|
86 |
+
)
|
|
|
87 |
"""
|
88 |
|
89 |
# TODO: Define external resources urls if needed
|
|
|
92 |
|
93 |
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
94 |
class SEScore(evaluate.Metric):
|
95 |
+
"""SEScore"""
|
96 |
|
97 |
def _info(self):
|
98 |
# TODO: Specifies the evaluate.EvaluationModuleInfo object
|