text
stringlengths
0
15.3k
def filter_age(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'age')
def filter_religion(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'religion')
def filter_disability(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'disability')
def filter_orientation(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'sexual-orientation')
def filter_nationality(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'nationality')
def filter_appearance(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'physical-appearance')
def filter_autre(dataset: datasets.Dataset) -> datasets.Dataset:
return filter_dataset(dataset, 'autre')
# File: lm-evaluation-harness-main/lm_eval/tasks/csatqa/_generate_configs.py
""""""
import argparse
import os
import yaml
from tqdm import tqdm
from lm_eval.logger import eval_logger
SUBSETS = ['WR', 'GR', 'RCS', 'RCSS', 'RCH', 'LI']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--base_yaml_path', required=True)
parser.add_argument('--save_prefix_path', default='csatqa')
parser.add_argument('--task_prefix', default='')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path, encoding='utf-8') as f:
base_yaml = yaml.full_load(f)
for name in tqdm(SUBSETS):
yaml_dict = {'include': base_yaml_name, 'task': f'csatqa_{args.task_prefix}_{name}' if args.task_prefix != '' else f'csatqa_{name.lower()}', 'dataset_name': name}
file_save_path = args.save_prefix_path + f'_{name.lower()}.yaml'
eval_logger.info(f'Saving yaml for subset {name} to {file_save_path}')
with open(file_save_path, 'w', encoding='utf-8') as yaml_file:
yaml.dump(yaml_dict, yaml_file, width=float('inf'), allow_unicode=True, default_style='"')
# File: lm-evaluation-harness-main/lm_eval/tasks/csatqa/utils.py
import datasets
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
def _process_doc(doc):
instruction = f"๋‹ค์Œ์„ ์ฝ๊ณ  ์ •๋‹ต์œผ๋กœ ์•Œ๋งž์€ ๊ฒƒ์„ ๊ณ ๋ฅด์‹œ์š”.\n### Context: {doc['context']}\n### Question: {doc['question']}\n### Options:\n(1) {doc['option#1']}\n(2) {doc['option#2']}\n(3) {doc['option#3']}\n(4) {doc['option#4']}\n(5) {doc['option#5']}\n### Answer: ์ฃผ์–ด์ง„ ๋ฌธ์ œ์˜ ์ •๋‹ต์€"
out_doc = {'question': instruction, 'choices': ['(1)', '(2)', '(3)', '(4)', '(5)'], 'gold': int(doc['gold']) - 1}
return out_doc
return dataset.map(_process_doc)
# File: lm-evaluation-harness-main/lm_eval/tasks/drop/utils.py
import re
import string
import numpy as np
from scipy.optimize import linear_sum_assignment
_ARTICLES = re.compile('\\b(a|an|the)\\b', re.UNICODE)
def process_docs(dataset):
def _process(doc):
return {'id': doc['query_id'], 'passage': doc['passage'], 'question': doc['question'], 'answers': get_answers(doc)}
return dataset.map(_process)
def get_answers(doc):
def _flatten_validated_answers(validated_answers):
valid_answers = []
for i in range(len(validated_answers['number'])):
valid_answers.append({'number': validated_answers['number'][i], 'date': validated_answers['date'][i], 'spans': validated_answers['spans'][i]})
return valid_answers
answers = []
answers_set = set()
candidates = [doc['answer']] + _flatten_validated_answers(doc['validated_answers'])
for candidate in candidates:
answer = parse_answer(candidate)
if answer in answers_set:
continue
answers_set.add(answer)
answers.append(answer)
return answers
def parse_answer(answer):
if answer['number'] != '':
return (str(answer['number']),)
if answer['spans'] != []:
return tuple(answer['spans'])
return (' '.join([answer['date']['day'], answer['date']['month'], answer['date']['year']]).strip(),)
def process_results(doc, results):
(preds, golds) = (results, doc['answers'])