text
stringlengths
0
15.3k
tokens = [token for token in tokens if token.strip()]
normalized = ' '.join(tokens).strip()
return normalized
# File: lm-evaluation-harness-main/lm_eval/tasks/eq_bench/utils.py
import math
import re
def calculate_score_fullscale(docs, results):
reference = eval(docs['reference_answer_fullscale'])
user = dict(re.findall('(\\w+):\\s+(\\d+)', results[0]))
if len(user.items()) != 4:
return {'eqbench': 0, 'percent_parseable': 0}
emotions_dict = {}
for (emotion, user_emotion_score) in user.items():
for i in range(1, 5):
if emotion == reference[f'emotion{i}']:
emotions_dict[emotion] = True
if len(emotions_dict) != 4:
print('! Error: emotions did not match reference')
print(user)
return {'eqbench': 0, 'percent_parseable': 0}
difference_tally = 0
for (emotion, user_emotion_score) in user.items():
for i in range(1, 5):
if emotion == reference[f'emotion{i}']:
d = abs(float(user_emotion_score) - float(reference[f'emotion{i}_score']))
if d == 0:
scaled_difference = 0
elif d <= 5:
scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))
else:
scaled_difference = d
difference_tally += scaled_difference
adjust_const = 0.7477
final_score = 10 - difference_tally * adjust_const
final_score_percent = final_score * 10
return {'eqbench': final_score_percent, 'percent_parseable': 100}
# File: lm-evaluation-harness-main/lm_eval/tasks/eus_exams/configs.py
import argparse
import json
import requests
import yaml
response = requests.get('https://datasets-server.huggingface.co/splits?dataset=HiTZ%2FEusExams', timeout=5)
response_json = json.loads(response.text)
CONFIGS = [split['config'] for split in response_json['splits']]
def gen_config_yamls(output_dir: str, overwrite: bool) -> None:
err = []
for config in CONFIGS:
file_name = f'eus_exams_{config}.yaml'
try:
with open(f'{output_dir}/{file_name}', 'w' if overwrite else 'x') as f:
f.write('# Generated by utils.py\n')
yaml.dump({'include': 'eus_exams_es' if 'eus_exams_es' in config else 'eus_exams_eu', 'dataset_name': config, 'task': f'eus_exams_{config}'}, f)
except FileExistsError:
err.append(file_name)
if len(err) > 0:
raise FileExistsError(f"Files were not created because they already exist (use --overwrite flag): {', '.join(err)}")
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--overwrite', default=False, action='store_true', help='Overwrite files if they already exist')
parser.add_argument('--output-dir', default='.', help='Directory to write yaml files to')
args = parser.parse_args()
gen_config_yamls(output_dir=args.output_dir, overwrite=args.overwrite)
if __name__ == '__main__':
main()
# File: lm-evaluation-harness-main/lm_eval/tasks/eus_exams/utils.py
import datasets
def process_docs(dataset: datasets.Dataset):
def valid_example(example: dict) -> bool:
if example['answer'] not in [0, 1, 2, 3]:
return False
if example['candidates'] == ['', '', '', '']:
return False
return True
return dataset.filter(valid_example)
# File: lm-evaluation-harness-main/lm_eval/tasks/eus_reading/utils.py
from typing import List
letters = ['A', 'B', 'C', 'D']
def doc_to_text_context(doc) -> str:
candidates = doc['candidates']
num_choices = len(candidates)
if num_choices < 2:
raise ValueError('Invalid number of candidates')
choices = letters[:num_choices]
formatted_choices = '\n'.join([f'{choice}: {candidates[i]}' for (i, choice) in enumerate(choices)])
return f"Pasartea: {doc['context']}\n\nGaldera: {doc['question']}\n{formatted_choices}\nErantzuna:"
def doc_to_choice(doc) -> List[str]:
num_choices = len(doc['candidates'])
if num_choices < 2:
raise ValueError('Invalid number of candidates')