text
stringlengths
0
15.3k
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print(bleuFromMaps(goldMap, predictionMap)[0])
# File: lm-evaluation-harness-main/lm_eval/tasks/copal_id/utils.py
from functools import partial
def convert_choice(choice):
return choice[0].lower() + choice[1:]
def doc_to_text(doc, connector):
conn = connector[doc['question']]
return doc['premise'].strip()[:-1] + f' {conn}'
def doc_to_choice(doc):
return [convert_choice(doc['choice1']), convert_choice(doc['choice2'])]
doc_to_text_id = partial(doc_to_text, connector={'cause': 'karena', 'effect': 'maka'})
# File: lm-evaluation-harness-main/lm_eval/tasks/coqa/utils.py
from itertools import zip_longest
import transformers.data.metrics.squad_metrics as squad_metrics
def doc_to_text(doc):
doc_text = doc['story'] + '\n\n'
for (q, a) in zip_longest(doc['questions']['input_text'], doc['answers']['input_text'][:-1]):
question = f'Q: {q}\n\n'
answer = f'A: {a}\n\n' if a is not None else 'A:'
doc_text += question + answer
return doc_text
def doc_to_target(doc):
turn_id = len(doc['questions']['input_text'])
answers = []
answer_forturn = doc['answers']['input_text'][turn_id - 1]
answers.append(answer_forturn)
additional_answers = doc.get('additional_answers')
if additional_answers:
for key in additional_answers:
additional_answer_for_turn = additional_answers[key]['input_text'][turn_id - 1]
if additional_answer_for_turn.lower() not in map(str.lower, answers):
answers.append(additional_answer_for_turn)
return answers
def em(gold_list, pred):
em_sum = 0.0
if len(gold_list) > 1:
for i in range(len(gold_list)):
gold_answers = gold_list[0:i] + gold_list[i + 1:]
em_sum += max((squad_metrics.compute_exact(a, pred) for a in gold_answers))
else:
em_sum += max((squad_metrics.compute_exact(a, pred) for a in gold_list))
return em_sum / max(1, len(gold_list))
def compute_scores(gold_list, pred):
f1_sum = 0.0
em_sum = 0.0
if len(gold_list) > 1:
for i in range(len(gold_list)):
gold_answers = gold_list[0:i] + gold_list[i + 1:]
em_sum += max((squad_metrics.compute_exact(a, pred) for a in gold_answers))
f1_sum += max((squad_metrics.compute_f1(a, pred) for a in gold_answers))
else:
em_sum += max((squad_metrics.compute_exact(a, pred) for a in gold_list))
f1_sum += max((squad_metrics.compute_f1(a, pred) for a in gold_list))
return {'em': em_sum / max(1, len(gold_list)), 'f1': f1_sum / max(1, len(gold_list))}
def process_results(doc, results):
gold_list = doc_to_target(doc)
pred = results[0].strip().split('\n')[0]
scores = compute_scores(gold_list, pred)
return scores
# File: lm-evaluation-harness-main/lm_eval/tasks/crows_pairs/utils.py
import datasets
def process_results(doc, results):
(lls, _) = zip(*results)
(likelihood1, likelihood2) = lls
diff = abs(likelihood1 - likelihood2)
acc = 1.0 if likelihood1 > likelihood2 else 0.0
return {'likelihood_diff': diff, 'pct_stereotype': acc}
def doc_to_choice(doc):
return [doc['sent_more'], doc['sent_less']]
def filter_dataset(dataset: datasets.Dataset, bias_type: str) -> datasets.Dataset:
return dataset.filter(lambda example: example['bias_type'].startswith(bias_type))
def filter_race_color(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'race-color')
def filter_socio(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'socioeconomic')
def filter_gender(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'gender')