text
stringlengths
0
15.3k
print(ss1, ss2)
return ss1 == ss2
except Exception:
return str1 == str2
def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
candidate = results[0]
gold = doc['answer']
if not gold:
print(doc, candidate, gold)
if is_equiv(candidate, gold):
retval = 1
else:
retval = 0
results = {'acc': retval}
return results
def process_results_mcqa(doc, results):
results = [result[0] for result in results]
gold = doc['gold']
acc = 1.0 if int(np.argmax(results)) in gold else 0.0
completion_len = np.array([float(len(i)) for i in doc['choices']])
acc_norm = 1.0 if int(np.argmax(results / completion_len)) in gold else 0.0
return {'acc': acc, 'acc_norm': acc_norm}
# File: lm-evaluation-harness-main/lm_eval/tasks/arabicmmlu/_generate_configs.py
""""""
import argparse
import logging
import os
import yaml
from tqdm import tqdm
eval_logger = logging.getLogger('lm-eval')
SUBJECTS = {'Driving Test': 'other', 'High Geography': 'social_science', 'High History': 'humanities', 'Islamic Studies': 'humanities', 'Univ Accounting': 'social_science', 'Primary General Knowledge': 'other', 'Univ Political Science': 'social_science', 'Primary Math': 'stem', 'Middle General Knowledge': 'other', 'High Biology': 'stem', 'Primary Natural Science': 'stem', 'High Economics': 'social_science', 'Middle Natural Science': 'stem', 'Middle Geography': 'social_science', 'Primary Social Science': 'social_science', 'Middle Computer Science': 'stem', 'Middle Islamic Studies': 'humanities', 'Primary Computer Science': 'stem', 'High Physics': 'stem', 'Middle Social Science': 'social_science', 'Middle Civics': 'social_science', 'High Computer Science': 'stem', 'General Knowledge': 'other', 'High Civics': 'social_science', 'Prof Law': 'humanities', 'High Islamic Studies': 'humanities', 'Primary Arabic Language': 'language', 'High Arabic Language': 'language', 'Arabic Language (Grammar)': 'language', 'Primary History': 'humanities', 'Middle History': 'humanities', 'Univ Economics': 'social_science', 'Arabic Language (General)': 'language', 'Univ Computer Science': 'stem', 'Primary Islamic Studies': 'humanities', 'Primary Geography': 'social_science', 'High Philosophy': 'humanities', 'Middle Arabic Language': 'language', 'Middle Economics': 'social_science', 'Univ Management': 'other'}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--base_yaml_path', default='_default_arabicmmlu_template_yaml')
parser.add_argument('--save_prefix_path', default='arabicmmlu')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
with open(args.base_yaml_path, encoding='utf-8') as f:
base_yaml = yaml.full_load(f)
ALL_CATEGORIES = []
for (subject, category) in tqdm(SUBJECTS.items()):
if category not in ALL_CATEGORIES:
ALL_CATEGORIES.append(category)
yaml_dict = {'include': base_yaml_name, 'tag': f'arabicmmlu_{category}', 'task': f"arabicmmlu_{subject.lower().replace(' ', '_')}", 'task_alias': subject, 'dataset_name': subject}
file_save_path = args.save_prefix_path + f"_{subject.lower().replace(' ', '_').replace('(', '').replace(')', '')}.yaml"
eval_logger.info(f'Saving yaml for subset {subject} to {file_save_path}')
with open(file_save_path, 'w', encoding='utf-8') as yaml_file:
yaml.dump(yaml_dict, yaml_file, allow_unicode=True, default_style='"')
arabicmmlu_subcategories = [f'arabicmmlu_{category}' for category in ALL_CATEGORIES]
file_save_path = args.save_prefix_path + '.yaml'
eval_logger.info(f'Saving benchmark config to {file_save_path}')
with open(file_save_path, 'w', encoding='utf-8') as yaml_file:
yaml.dump({'group': 'arabicmmlu', 'task': arabicmmlu_subcategories}, yaml_file, indent=4, default_flow_style=False)
# File: lm-evaluation-harness-main/lm_eval/tasks/arabicmmlu/utils.py
PROMPT = 'This is a {}. Select the correct answer!\n\nQuestion: {}\n{}\n\nAnswer:'
level_en = {'Primary': 'primary school', 'Middle': 'middle school', 'High': 'high school', 'Univ': 'university', 'Prof': 'professional'}
alpa = ['A.', 'B.', 'C.', 'D.', 'E.']
def doc_to_text(doc):
level = '' if not doc['Level'] else ' for ' + level_en[doc['Level']]
country = '' if not doc['Country'] else ' in ' + doc['Country']
main_meta_data = f"{doc['Subject']} question{level}{country}"
question = doc['Question'] if doc['Context'] == '' else f"{doc['Context']}\n\n{doc['Question']}"
options = []
for (i, opt) in enumerate(['Option 1', 'Option 2', 'Option 3', 'Option 4', 'Option 5']):
if not doc[opt]:
break
options.append(f'{alpa[i]} {doc[opt]}')
doc_text = PROMPT.format(main_meta_data, question, '\n'.join(options))
return doc_text
def doc_to_choice(doc):
return [alpa[i][0] for i in range(5) if doc[f'Option {i + 1}']]
# File: lm-evaluation-harness-main/lm_eval/tasks/basqueglue/utils.py
import html
import re
from datasets import load_metric
def general_detokenize(string):
string = re.sub('\\s+([.,;:!?)])', '\\1', string)
string = re.sub('(\\s+|^)\\(\\s+([^)]+)\\s+\\)', '\\1(\\2)', string)
string = re.sub('(\\s+|^)\\[\\s+([^)]+)\\s+\\]', '\\1[\\2]', string)
string = re.sub('(\\s+|^)"\\s+([^"]+)\\s+"', '\\1"\\2"', string)
string = re.sub("(\\s+|^)'\\s+([^']+)\\s+'", "\\1'\\2'", string)
return string
def process_doc(string):
string = html.unescape(string)
string = general_detokenize(string)
return string
def process_wic_docs(dataset):