text
stringlengths 0
15.3k
|
---|
is_following_list = [] |
for (index, instruction_id) in enumerate(instruction_list): |
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id] |
instruction = instruction_cls(instruction_id) |
kwargs = {k: v for (k, v) in inp.kwargs[index].items() if v} |
instruction.build_description(**kwargs) |
args = instruction.get_instruction_args() |
if args and 'prompt' in args: |
instruction.build_description(prompt=inp.prompt) |
if response.strip() and instruction.check_following(response): |
is_following_list.append(True) |
else: |
is_following_list.append(False) |
return OutputExample(instruction_id_list=inp.instruction_id_list, prompt=inp.prompt, response=response, follow_all_instructions=all(is_following_list), follow_instruction_list=is_following_list) |
def test_instruction_following_loose(inp, response): |
r = response.split('\n') |
response_remove_first = '\n'.join(r[1:]).strip() |
response_remove_last = '\n'.join(r[:-1]).strip() |
response_remove_both = '\n'.join(r[1:-1]).strip() |
revised_response = response.replace('*', '') |
revised_response_remove_first = response_remove_first.replace('*', '') |
revised_response_remove_last = response_remove_last.replace('*', '') |
revised_response_remove_both = response_remove_both.replace('*', '') |
all_responses = [response, revised_response, response_remove_first, response_remove_last, response_remove_both, revised_response_remove_first, revised_response_remove_last, revised_response_remove_both] |
instruction_list = inp.instruction_id_list |
is_following_list = [] |
for (index, instruction_id) in enumerate(instruction_list): |
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id] |
instruction = instruction_cls(instruction_id) |
kwargs = {k: v for (k, v) in inp.kwargs[index].items() if v} |
instruction.build_description(**kwargs) |
args = instruction.get_instruction_args() |
if args and 'prompt' in args: |
instruction.build_description(prompt=inp.prompt) |
is_following = False |
for r in all_responses: |
if r.strip() and instruction.check_following(r): |
is_following = True |
break |
is_following_list.append(is_following) |
return OutputExample(instruction_id_list=inp.instruction_id_list, prompt=inp.prompt, response=response, follow_all_instructions=all(is_following_list), follow_instruction_list=is_following_list) |
def process_results(doc, results): |
eval_logger.warning('This task is meant for chat-finetuned models, and may not give meaningful results for models other than `openai` or `anthropic` if `doc_to_text` in its YAML is not wrapped in the appropriate chat template string. This warning will be removed when chat templating support is added natively to local models') |
inp = InputExample(key=doc['key'], instruction_id_list=doc['instruction_id_list'], prompt=doc['prompt'], kwargs=doc['kwargs']) |
response = results[0] |
out_strict = test_instruction_following_strict(inp, response) |
out_loose = test_instruction_following_loose(inp, response) |
return {'prompt_level_strict_acc': out_strict.follow_all_instructions, 'inst_level_strict_acc': out_strict.follow_instruction_list, 'prompt_level_loose_acc': out_loose.follow_all_instructions, 'inst_level_loose_acc': out_loose.follow_instruction_list} |
def agg_inst_level_acc(items): |
flat_items = [item for sublist in items for item in sublist] |
inst_level_acc = sum(flat_items) / len(flat_items) |
return inst_level_acc |
# File: lm-evaluation-harness-main/lm_eval/tasks/kobest/utils.py |
from datasets import Dataset |
from sklearn.metrics import f1_score |
def copa_doc_to_text(doc: dict) -> str: |
connector = {'์์ธ': ' ์๋ํ๋ฉด', '๊ฒฐ๊ณผ': ' ๊ทธ๋์'}[doc['question'].strip()] |
return f"{doc['premise']} {connector}" |
def copa_doc_to_target(doc: dict) -> str: |
correct_choice = doc['alternative_1'] if doc['label'] == 0 else doc['alternative_2'] |
return f'{correct_choice}' |
def copa_doc_to_choice(doc: dict) -> list: |
return [f"{doc['alternative_1']}", f"{doc['alternative_2']}"] |
def sentineg_doc_to_text(doc: dict): |
return f"๋ฌธ์ฅ: {doc['sentence']} ๊ธ๋ถ์ :" |
def wic_doc_to_text(doc: dict) -> str: |
return f"๋ฌธ์ฅ1: {doc['context_1']} ๋ฌธ์ฅ2: {doc['context_2']} ๋ ๋ฌธ์ฅ์์ {doc['word']}๊ฐ ๊ฐ์ ๋ป์ผ๋ก ์ฐ์๋?" |
def hellaswag_process_doc(doc: Dataset) -> Dataset: |
def preprocessor(dataset): |
return {'query': f"๋ฌธ์ฅ: {dataset['context']}", 'choices': [dataset['ending_1'], dataset['ending_2'], dataset['ending_3'], dataset['ending_4']], 'gold': int(dataset['label'])} |
return doc.map(preprocessor) |
def macro_f1_score(items): |
unzipped_list = list(zip(*items)) |
golds = unzipped_list[0] |
preds = unzipped_list[1] |
fscore = f1_score(golds, preds, average='macro') |
return fscore |
# File: lm-evaluation-harness-main/lm_eval/tasks/leaderboard/gpqa/utils.py |
import random |
import re |
import datasets |
def preprocess(text): |
if text is None: |
return ' ' |
text = text.strip() |
text = text.replace(' [title]', '. ') |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.