berkatil commited on
Commit
c8a7177
·
1 Parent(s): 49e5a7f
Files changed (2) hide show
  1. README.md +2 -2
  2. map.py +3 -3
README.md CHANGED
@@ -22,11 +22,11 @@ It is the average of the precision scores computer after each relevant document
22
  >>> my_new_module = evaluate.load("map")
23
  >>> references= [json.dumps({"q_1":{"d_1":1, "d_2":2} }),
24
  json.dumps({"q_2":{"d_2":1, "d_3":2, "d_5":3}})]
25
- >>> predictions = [json.dumps({"q_1": { "d_1": 0.9, "d_2": 0.8}}),
26
  json.dumps({"q_2": {"d_2": 0.9, "d_1": 0.8, "d_5": 0.7, "d_3": 0.3}})]
27
  >>> results = my_new_module.compute(references=references, predictions=predictions)
28
  >>> print(results)
29
- {'map': 0.902777}
30
  ```
31
  ### Inputs
32
  - **predictions:** a list of dictionaries where each dictionary consists of document relevancy scores produced by the model for a given query. One dictionary per query. The dictionaries should be converted to string.
 
22
  >>> my_new_module = evaluate.load("map")
23
  >>> references= [json.dumps({"q_1":{"d_1":1, "d_2":2} }),
24
  json.dumps({"q_2":{"d_2":1, "d_3":2, "d_5":3}})]
25
+ >>> predictions = [json.dumps({"q_1": { "d_1": 0.8, "d_2": 0.9}}),
26
  json.dumps({"q_2": {"d_2": 0.9, "d_1": 0.8, "d_5": 0.7, "d_3": 0.3}})]
27
  >>> results = my_new_module.compute(references=references, predictions=predictions)
28
  >>> print(results)
29
+ {'map': 1.0}
30
  ```
31
  ### Inputs
32
  - **predictions:** a list of dictionaries where each dictionary consists of document relevancy scores produced by the model for a given query. One dictionary per query. The dictionaries should be converted to string.
map.py CHANGED
@@ -54,11 +54,11 @@ Examples:
54
  >>> my_new_module = evaluate.load("map")
55
  >>> references= [json.dumps({"q_1":{"d_1":1, "d_2":2} }),
56
  json.dumps({"q_2":{"d_2":1, "d_3":2, "d_5":3}})]
57
- >>> predictions = [json.dumps({"q_1": { "d_1": 0.9, "d_2": 0.8}}),
58
  json.dumps({"q_2": {"d_2": 0.9, "d_1": 0.8, "d_5": 0.7, "d_3": 0.3}})]
59
  >>> results = my_new_module.compute(references=references, predictions=predictions)
60
  >>> print(results)
61
- {'map': 0.902777}
62
  """
63
 
64
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
@@ -74,7 +74,7 @@ class map(evaluate.Metric):
74
  features=datasets.Features({
75
  'predictions': datasets.Value("string"),
76
  'references': datasets.Value("string"),
77
- 'k': datasets.Value("int", default=None)
78
  }),
79
  # Homepage of the module for documentation
80
  reference_urls=["https://amenra.github.io/ranx/"]
 
54
  >>> my_new_module = evaluate.load("map")
55
  >>> references= [json.dumps({"q_1":{"d_1":1, "d_2":2} }),
56
  json.dumps({"q_2":{"d_2":1, "d_3":2, "d_5":3}})]
57
+ >>> predictions = [json.dumps({"q_1": { "d_1": 0.8, "d_2": 0.9}}),
58
  json.dumps({"q_2": {"d_2": 0.9, "d_1": 0.8, "d_5": 0.7, "d_3": 0.3}})]
59
  >>> results = my_new_module.compute(references=references, predictions=predictions)
60
  >>> print(results)
61
+ {'recall': 1.0}
62
  """
63
 
64
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
 
74
  features=datasets.Features({
75
  'predictions': datasets.Value("string"),
76
  'references': datasets.Value("string"),
77
+ 'k': datasets.Value("int")
78
  }),
79
  # Homepage of the module for documentation
80
  reference_urls=["https://amenra.github.io/ranx/"]