text
stringlengths 0
15.3k
|
---|
def _helper(doc): |
doc['sentence1'] = process_doc(doc['sentence1']).encode('latin-1').decode('utf-8') |
doc['sentence2'] = process_doc(doc['sentence2']).encode('latin-1').decode('utf-8') |
return doc |
return dataset.map(_helper) |
def coref_doc_to_text(x): |
def _span_in_context(span_index, span_text): |
span_start = span_index |
span_end = span_start + len(span_text.split(' ')) - 1 |
tokens[span_start] = f'*{tokens[span_start]}' |
tokens[span_end] = f'{tokens[span_end]}*' |
tokens = x['text'].split(' ') |
_span_in_context(x['span1_index'], x['span1_text']) |
_span_in_context(x['span2_index'] - 1, x['span2_text']) |
context = process_doc(' '.join(tokens)) |
span_1 = process_doc(x['span1_text']) |
span_2 = process_doc(x['span2_text']) |
text = f'Testua: {context}\n' + f'Galdera: Aurreko testuan, "*{span_1}*" eta "*{span_2}*" gauza bera dira?\n' + 'Erantzuna:' |
return text |
def micro_f1_score(items): |
f1_metric = load_metric('f1') |
(golds, preds) = list(zip(*items)) |
f1_score = f1_metric.compute(references=golds, predictions=preds, average='micro')['f1'] |
return f1_score |
def vaxx_f1_score(items): |
f1_metric = load_metric('f1') |
(golds, preds) = list(zip(*items)) |
f1_class = f1_metric.compute(references=golds, predictions=preds, labels=[0, 2], average=None)['f1'] |
f1_score = sum(f1_class) / len(f1_class) |
return f1_score |
# File: lm-evaluation-harness-main/lm_eval/tasks/bbh/_generate_configs.py |
"""""" |
import argparse |
import os |
import re |
import datasets |
import requests |
import yaml |
from tqdm import tqdm |
from lm_eval import utils |
def parse_args(): |
parser = argparse.ArgumentParser() |
parser.add_argument('--base_yaml_path', required=True) |
parser.add_argument('--save_prefix_path', default='zeroshot') |
parser.add_argument('--cot', default=False) |
parser.add_argument('--fewshot', default=False) |
parser.add_argument('--task_prefix', default='') |
return parser.parse_args() |
if __name__ == '__main__': |
args = parse_args() |
base_yaml_name = os.path.split(args.base_yaml_path)[-1] |
with open(args.base_yaml_path, encoding='utf-8') as f: |
base_yaml = yaml.full_load(f) |
base_doc_to_text = 'Q: {{input}}\nA:' |
answer_regex = re.compile('(?<=answer is )(.*)(?=.)') |
dataset_path = 'lukaemon/bbh' |
for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): |
resp = requests.get(f'https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt').content.decode('utf-8') |
prompt = resp.split('\n-----\n')[-1] |
(description, *few_shot) = prompt.split('\n\n') |
prefix_doc_to_text = '' |
if args.fewshot: |
if args.cot: |
prefix_doc_to_text = '\n\n'.join(few_shot) + '\n\n' |
else: |
for shot in few_shot: |
try: |
answer = answer_regex.search(shot)[0] |
except Exception: |
print('task', task) |
print(shot) |
example = shot.split("Let's think step by step.")[0] |
prefix_doc_to_text += f'{example}{answer}\n\n' |
doc_to_text = prefix_doc_to_text + base_doc_to_text |
if args.cot: |
doc_to_text = doc_to_text + " Let's think step by step.\n" |
yaml_dict = {'include': base_yaml_name, 'task': f'bbh_{args.task_prefix}_{task}', 'dataset_name': task, 'description': description + '\n\n', 'doc_to_text': doc_to_text} |
file_save_path = args.save_prefix_path + f'/{task}.yaml' |
utils.eval_logger.info(f'Saving yaml for subset {task} to {file_save_path}') |
with open(file_save_path, 'w', encoding='utf-8') as yaml_file: |
yaml.dump(yaml_dict, yaml_file, width=float('inf'), allow_unicode=True, default_style='"') |
# File: lm-evaluation-harness-main/lm_eval/tasks/bbh/cot_zeroshot/utils.py |
import collections |
import re |
import sys |
import unicodedata |
from lm_eval.filters.extraction import Filter, RegexFilter |
class ExtendedRegexFilter(RegexFilter): |
punct_tbl = dict.fromkeys((i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))) |
def __init__(self, regex_pattern: str='#### (\\-?[0-9\\.\\,]+)', group_select=0, fallback: str='[invalid]', ignore_case=False, ignore_punctuation=False, regexes_to_ignore=None) -> None: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.