yzha commited on
Commit
b0eb96e
·
1 Parent(s): c7a0416
Files changed (1) hide show
  1. ctc_eval.py +17 -20
ctc_eval.py CHANGED
@@ -19,16 +19,19 @@ import datasets
19
 
20
  # TODO: Add BibTeX citation
21
  _CITATION = """\
22
- @InProceedings{huggingface:module,
23
- title = {A great new module},
24
- authors={huggingface, Inc.},
25
- year={2020}
 
 
26
  }
27
  """
28
 
29
  # TODO: Add description of the module here
30
  _DESCRIPTION = """\
31
- This new module is designed to solve this great ML task and is crafted with a lot of care.
 
32
  """
33
 
34
 
@@ -36,21 +39,15 @@ This new module is designed to solve this great ML task and is crafted with a lo
36
  _KWARGS_DESCRIPTION = """
37
  Calculates how good are predictions given some references, using certain scores
38
  Args:
39
- predictions: list of predictions to score. Each predictions
40
- should be a string with tokens separated by spaces.
41
- references: list of reference for each prediction. Each
42
- reference should be a string with tokens separated by spaces.
43
  Returns:
44
- accuracy: description of the first score,
45
- another_score: description of the second score,
46
  Examples:
47
- Examples should be written in doctest format, and should illustrate how
48
- to use the function.
49
-
50
- >>> my_new_module = evaluate.load("my_new_module")
51
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
  >>> print(results)
53
- {'accuracy': 1.0}
54
  """
55
 
56
  # TODO: Define external resources urls if needed
@@ -75,10 +72,10 @@ class CTC_Eval(evaluate.EvaluationModule):
75
  'references': datasets.Value('large_string'),
76
  }),
77
  # Homepage of the module for documentation
78
- homepage="http://module.homepage",
79
  # Additional links to the codebase or references
80
- codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
- reference_urls=["http://path.to.reference.url/new_module"]
82
  )
83
 
84
  def _download_and_prepare(self, dl_manager):
 
19
 
20
  # TODO: Add BibTeX citation
21
  _CITATION = """\
22
+ @inproceedings{deng2021compression,
23
+ title={Compression, Transduction, and Creation: A Unified Framework for Evaluating Natural Language Generation},
24
+ author={Deng, Mingkai and Tan, Bowen and Liu, Zhengzhong and Xing, Eric and Hu, Zhiting},
25
+ booktitle={Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing},
26
+ pages={7580--7605},
27
+ year={2021}
28
  }
29
  """
30
 
31
  # TODO: Add description of the module here
32
  _DESCRIPTION = """\
33
+ This repo contains code of an automatic evaluation metric described in the paper
34
+ Compression, Transduction, and Creation: A Unified Framework for Evaluating Natural Language Generation
35
  """
36
 
37
 
 
39
  _KWARGS_DESCRIPTION = """
40
  Calculates how good are predictions given some references, using certain scores
41
  Args:
42
+ predictions: List of texts (Hypothesis) to score. The list now only supports one piece of text
43
+ references: List of texts (Premise) to score. The list now only supports one piece of text
 
 
44
  Returns:
45
+ ctc_score: The CTC score
 
46
  Examples:
47
+ >>> ctc_score = evaluate.load("yzha/ctc_eval")
48
+ >>> results = ctc_score.compute(references=['hello world'], predictions='hi world')
 
 
 
49
  >>> print(results)
50
+ {'ctc_score': 0.5211202502250671}
51
  """
52
 
53
  # TODO: Define external resources urls if needed
 
72
  'references': datasets.Value('large_string'),
73
  }),
74
  # Homepage of the module for documentation
75
+ homepage="https://github.com/tanyuqian/ctc-gen-eval",
76
  # Additional links to the codebase or references
77
+ codebase_urls=["https://github.com/tanyuqian/ctc-gen-eval"],
78
+ reference_urls=["https://github.com/tanyuqian/ctc-gen-eval"]
79
  )
80
 
81
  def _download_and_prepare(self, dl_manager):