mtzig commited on
Commit
0229d4e
·
1 Parent(s): c0c786c

implements model

Browse files
Files changed (2) hide show
  1. README.md +2 -33
  2. cross_entropy_loss.py +20 -44
README.md CHANGED
@@ -1,11 +1,10 @@
1
  ---
2
  title: cross_entropy_loss
3
- datasets:
4
  -
5
  tags:
6
  - evaluate
7
  - metric
8
- description: "TODO: add a description here"
9
  sdk: gradio
10
  sdk_version: 3.19.1
11
  app_file: app.py
@@ -14,37 +13,7 @@ pinned: false
14
 
15
  # Metric Card for cross_entropy_loss
16
 
17
- ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
 
19
  ## Metric Description
20
- *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
 
22
- ## How to Use
23
- *Give general statement of how to use the metric*
24
-
25
- *Provide simplest possible example for using the metric*
26
-
27
- ### Inputs
28
- *List all input arguments in the format below*
29
- - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
-
31
- ### Output Values
32
-
33
- *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
-
35
- *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
-
37
- #### Values from Popular Papers
38
- *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
-
40
- ### Examples
41
- *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
-
43
- ## Limitations and Bias
44
- *Note any known limitations or biases that the metric has, with links and references if possible.*
45
-
46
- ## Citation
47
- *Cite the source where this metric was introduced.*
48
-
49
- ## Further References
50
- *Add any useful further references.*
 
1
  ---
2
  title: cross_entropy_loss
 
3
  -
4
  tags:
5
  - evaluate
6
  - metric
7
+ description: "computes the cross entropy loss"
8
  sdk: gradio
9
  sdk_version: 3.19.1
10
  app_file: app.py
 
13
 
14
  # Metric Card for cross_entropy_loss
15
 
 
16
 
17
  ## Metric Description
 
18
 
19
+ A simple metric that calculates cross-entropy loss. Created so that I can log losses from different training tasks within a Hugging Face trainer for multi-task training.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cross_entropy_loss.py CHANGED
@@ -11,50 +11,30 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- """TODO: Add a description here."""
15
 
16
  import evaluate
17
  import datasets
18
-
 
19
 
20
  # TODO: Add BibTeX citation
21
- _CITATION = """\
22
- @InProceedings{huggingface:module,
23
- title = {A great new module},
24
- authors={huggingface, Inc.},
25
- year={2020}
26
- }
27
  """
28
 
29
  # TODO: Add description of the module here
30
  _DESCRIPTION = """\
31
- This new module is designed to solve this great ML task and is crafted with a lot of care.
32
  """
33
 
34
 
35
  # TODO: Add description of the arguments of the module here
36
  _KWARGS_DESCRIPTION = """
37
- Calculates how good are predictions given some references, using certain scores
38
- Args:
39
- predictions: list of predictions to score. Each predictions
40
- should be a string with tokens separated by spaces.
41
- references: list of reference for each prediction. Each
42
- reference should be a string with tokens separated by spaces.
43
- Returns:
44
- accuracy: description of the first score,
45
- another_score: description of the second score,
46
- Examples:
47
- Examples should be written in doctest format, and should illustrate how
48
- to use the function.
49
-
50
- >>> my_new_module = evaluate.load("my_new_module")
51
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
- >>> print(results)
53
- {'accuracy': 1.0}
54
  """
55
 
56
  # TODO: Define external resources urls if needed
57
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
 
59
 
60
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
@@ -70,26 +50,22 @@ class cross_entropy_loss(evaluate.Metric):
70
  citation=_CITATION,
71
  inputs_description=_KWARGS_DESCRIPTION,
72
  # This defines the format of each prediction and reference
73
- features=datasets.Features({
74
- 'predictions': datasets.Value('int64'),
75
- 'references': datasets.Value('int64'),
76
- }),
77
- # Homepage of the module for documentation
78
- homepage="http://module.homepage",
79
- # Additional links to the codebase or references
80
- codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
- reference_urls=["http://path.to.reference.url/new_module"]
82
  )
83
 
84
- def _download_and_prepare(self, dl_manager):
85
- """Optional: download external resources useful to compute the scores"""
86
- # TODO: Download external resources if needed
87
- pass
88
 
89
- def _compute(self, predictions, references):
90
  """Returns the scores"""
91
- # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
 
 
93
  return {
94
- "accuracy": accuracy,
95
  }
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """Cross Entropy Loss Metric"""
15
 
16
  import evaluate
17
  import datasets
18
+ import torch.nn.functional as F
19
+ import torch
20
 
21
  # TODO: Add BibTeX citation
22
+ _CITATION = """
 
 
 
 
 
23
  """
24
 
25
  # TODO: Add description of the module here
26
  _DESCRIPTION = """\
27
+ A simple metric that calculates cross-entropy loss. Created so that I can log losses from different training tasks within a Hugging Face trainer for multi-task training.
28
  """
29
 
30
 
31
  # TODO: Add description of the arguments of the module here
32
  _KWARGS_DESCRIPTION = """
33
+ prediction_scores: the logits
34
+ references: the labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  """
36
 
37
  # TODO: Define external resources urls if needed
 
38
 
39
 
40
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
 
50
  citation=_CITATION,
51
  inputs_description=_KWARGS_DESCRIPTION,
52
  # This defines the format of each prediction and reference
53
+ features=datasets.Features(
54
+ {
55
+ "prediction_scores": datasets.Sequence(datasets.Value("float")),
56
+ "references": datasets.Value("int32")
57
+ }
58
+ )
 
 
 
59
  )
60
 
61
+
 
 
 
62
 
63
+ def _compute(self, prediction_scores, references):
64
  """Returns the scores"""
65
+
66
+ loss = F.cross_entropy(input=torch.from_numpy(prediction_scores).flatten(start_dim=0, end_dim=1),
67
+ target=torch.from_numpy(references).flatten(),
68
+ ignore_index=-100).item()
69
  return {
70
+ "cross_entropy_loss": loss
71
  }