text
stringlengths 0
15.3k
|
---|
if match: |
match = str(w2n.word_to_num(match)) |
if not match: |
match = self.fallback |
filtered.append(match) |
filtered_resps.append(filtered) |
return filtered_resps |
class WordSortFilter(Filter): |
"""""" |
def apply(self, resps, docs): |
filtered_resps = [] |
for (r, doc) in zip(resps, docs): |
words = doc['input'].split('List:')[1].strip().split() |
regex = re.compile('|'.join([f'\\b{w}\\b' for w in words])) |
filtered = [] |
for resp in r: |
match = regex.findall(resp) |
match.reverse() |
ordered_words = reversed(collections.OrderedDict(zip(match, [None] * len(match)))) |
filtered.append(' '.join(ordered_words)) |
filtered_resps.append(filtered) |
return filtered_resps |
class MultiChoiceRegexFilter(ExtendedRegexFilter): |
def __init__(self, *args, **kwargs): |
super().__init__(*args, **kwargs) |
def apply(self, resps, docs): |
filtered_resps = [] |
for (r, doc) in zip(resps, docs): |
fallback_regexes = [] |
choice_to_alpha = {} |
next_alpha = 'A' |
without_paren_fallback_regexes = [] |
without_paren_to_target = {} |
multiple_choices_regex = re.compile('\\([A-Z]\\)([^\\n^(]*)') |
match = multiple_choices_regex.findall(doc['input']) |
for m in match: |
m = self.filter_ignores(m.strip()) |
fallback_regexes.append(f'{re.escape(m)}') |
choice_to_alpha[m] = f'({next_alpha})' |
without_paren_fallback_regexes.append(next_alpha) |
without_paren_to_target[next_alpha] = f'({next_alpha})' |
next_alpha = chr(ord(next_alpha) + 1) |
fallback_regex = re.compile('|'.join(fallback_regexes)) |
without_paren_fallback_regex = '|'.join(without_paren_fallback_regexes) |
without_paren_fallback_regex = re.compile(f':[\\s]*({without_paren_fallback_regex})') |
filtered = [] |
for resp in r: |
match = self.find_match(self.regex, resp) |
if not match: |
match = self.find_match(fallback_regex, self.filter_ignores(resp), choice_to_alpha) |
if not match: |
match = self.find_match(without_paren_fallback_regex, resp, without_paren_to_target) |
if not match: |
match = self.fallback |
filtered.append(match) |
filtered_resps.append(filtered) |
return filtered_resps |
# File: lm-evaluation-harness-main/lm_eval/tasks/belebele/_generate_configs.py |
"""""" |
import argparse |
import os |
import requests |
import yaml |
from tqdm import tqdm |
from lm_eval.utils import logging |
API_URL = 'https://datasets-server.huggingface.co/splits?dataset=facebook/belebele' |
def parse_args(): |
parser = argparse.ArgumentParser() |
parser.add_argument('--base_yaml_path', required=True) |
parser.add_argument('--save_prefix_path', default='belebele') |
parser.add_argument('--cot_prompt_path', default=None) |
parser.add_argument('--task_prefix', default='') |
return parser.parse_args() |
if __name__ == '__main__': |
args = parse_args() |
base_yaml_name = os.path.split(args.base_yaml_path)[-1] |
with open(args.base_yaml_path, encoding='utf-8') as f: |
base_yaml = yaml.full_load(f) |
if args.cot_prompt_path is not None: |
import json |
with open(args.cot_prompt_path, encoding='utf-8') as f: |
cot_file = json.load(f) |
def query(): |
response = requests.get(API_URL) |
return response.json()['splits'] |
print(query()) |
languages = [split['split'] for split in query()] |
for lang in tqdm([lang for lang in languages if 'default' not in lang]): |
yaml_dict = {'include': base_yaml_name, 'task': f'belebele_{args.task_prefix}_{lang}' if args.task_prefix != '' else f'belebele_{lang}', 'test_split': lang, 'fewshot_split': lang} |
file_save_path = args.save_prefix_path + f'_{lang}.yaml' |
logging.info(f'Saving yaml for subset {lang} to {file_save_path}') |
with open(file_save_path, 'w', encoding='utf-8') as yaml_file: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.