Spaces:
Running
Running
Update metric.py
Browse files
metric.py
CHANGED
@@ -115,8 +115,8 @@ class Accuracy(evaluate.Metric):
|
|
115 |
false_positives = 0
|
116 |
false_negatives = 0
|
117 |
|
118 |
-
print(f"pred_table:{pred_table}")
|
119 |
-
print(f"true_table:{true_table}")
|
120 |
|
121 |
# Convert lists to dictionaries for easier comparison
|
122 |
pred_dict = {tuple(sorted(item[0])): item[1] for item in pred_table}
|
@@ -160,7 +160,7 @@ class Accuracy(evaluate.Metric):
|
|
160 |
def _compute(self, predictions, references):
|
161 |
predictions = "".join(predictions)
|
162 |
references = "".join(references)
|
163 |
-
print(predictions)
|
164 |
return self._calculate_table_metrics(self._markdown_to_table(predictions), self._markdown_to_table(references))
|
165 |
|
166 |
def main():
|
|
|
115 |
false_positives = 0
|
116 |
false_negatives = 0
|
117 |
|
118 |
+
# print(f"pred_table:{pred_table}")
|
119 |
+
# print(f"true_table:{true_table}")
|
120 |
|
121 |
# Convert lists to dictionaries for easier comparison
|
122 |
pred_dict = {tuple(sorted(item[0])): item[1] for item in pred_table}
|
|
|
160 |
def _compute(self, predictions, references):
|
161 |
predictions = "".join(predictions)
|
162 |
references = "".join(references)
|
163 |
+
# print(predictions)
|
164 |
return self._calculate_table_metrics(self._markdown_to_table(predictions), self._markdown_to_table(references))
|
165 |
|
166 |
def main():
|