Aye10032 commited on
Commit
bcb7c5c
·
1 Parent(s): 7d774d8
Files changed (2) hide show
  1. loss_metric.py +14 -22
  2. requirements.txt +2 -1
loss_metric.py CHANGED
@@ -11,28 +11,25 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- """TODO: Add a description here."""
15
 
16
  import evaluate
17
  import datasets
18
 
 
19
 
20
- # TODO: Add BibTeX citation
21
  _CITATION = """\
22
  @InProceedings{huggingface:module,
23
- title = {A great new module},
24
- authors={huggingface, Inc.},
25
- year={2020}
26
  }
27
  """
28
 
29
- # TODO: Add description of the module here
30
  _DESCRIPTION = """\
31
- This new module is designed to solve this great ML task and is crafted with a lot of care.
32
  """
33
 
34
-
35
- # TODO: Add description of the arguments of the module here
36
  _KWARGS_DESCRIPTION = """
37
  Calculates how good are predictions given some references, using certain scores
38
  Args:
@@ -41,25 +38,21 @@ Args:
41
  references: list of reference for each prediction. Each
42
  reference should be a string with tokens separated by spaces.
43
  Returns:
44
- accuracy: description of the first score,
45
- another_score: description of the second score,
46
  Examples:
47
  Examples should be written in doctest format, and should illustrate how
48
  to use the function.
49
 
50
- >>> my_new_module = evaluate.load("my_new_module")
51
  >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
  >>> print(results)
53
- {'accuracy': 1.0}
54
  """
55
 
56
- # TODO: Define external resources urls if needed
57
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
-
59
 
60
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
  class loss_metric(evaluate.Metric):
62
- """TODO: Short description of my evaluation module."""
63
 
64
  def _info(self):
65
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
@@ -83,13 +76,12 @@ class loss_metric(evaluate.Metric):
83
 
84
  def _download_and_prepare(self, dl_manager):
85
  """Optional: download external resources useful to compute the scores"""
86
- # TODO: Download external resources if needed
87
  pass
88
 
89
  def _compute(self, predictions, references):
90
  """Returns the scores"""
91
- # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
93
  return {
94
- "accuracy": accuracy,
95
- }
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """Calculation of the cross-entropy loss function using the huggingface evaluate module."""
15
 
16
  import evaluate
17
  import datasets
18
 
19
+ from torch import nn
20
 
 
21
  _CITATION = """\
22
  @InProceedings{huggingface:module,
23
+ title = {Loss Metric},
24
+ authors={YU YE},
25
+ year={2024}
26
  }
27
  """
28
 
 
29
  _DESCRIPTION = """\
30
+ Calculation of the cross-entropy loss function using the huggingface evaluate module.
31
  """
32
 
 
 
33
  _KWARGS_DESCRIPTION = """
34
  Calculates how good are predictions given some references, using certain scores
35
  Args:
 
38
  references: list of reference for each prediction. Each
39
  reference should be a string with tokens separated by spaces.
40
  Returns:
41
+ loss: description of the first score,
 
42
  Examples:
43
  Examples should be written in doctest format, and should illustrate how
44
  to use the function.
45
 
46
+ >>> my_new_module = evaluate.load("Aye10032/loss_metric")
47
  >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
48
  >>> print(results)
49
+ {'loss': 1.0}
50
  """
51
 
 
 
 
52
 
53
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
54
  class loss_metric(evaluate.Metric):
55
+ """Calculation of the cross-entropy loss function using the huggingface evaluate module."""
56
 
57
  def _info(self):
58
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
 
76
 
77
  def _download_and_prepare(self, dl_manager):
78
  """Optional: download external resources useful to compute the scores"""
 
79
  pass
80
 
81
  def _compute(self, predictions, references):
82
  """Returns the scores"""
83
+ loss_func = nn.CrossEntropyLoss()
84
+ loss = loss_func(predictions, references)
85
  return {
86
+ "loss": loss,
87
+ }
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- git+https://github.com/huggingface/evaluate@main
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ torch