text
stringlengths 0
15.3k
|
---|
def process_docs(dataset, set_answer_type='bool'): |
FEATURES = ['title', 'abstract', 'question', 'answer', 'answer_type'] |
def _categorise_answer(answer_blob): |
if answer_blob['unanswerable']: |
answer = 'unanswerable' |
answer_type = 'unanswerable' |
return (answer, answer_type) |
elif answer_blob['yes_no']: |
answer = 'yes' |
answer_type = 'bool' |
return (answer, answer_type) |
elif answer_blob['free_form_answer']: |
answer = answer_blob['free_form_answer'] |
answer_type = 'free form answer' |
return (answer, answer_type) |
elif answer_blob['extractive_spans']: |
answer = answer_blob['extractive_spans'] |
answer_type = 'extractive_spans' |
return (answer, answer_type) |
elif answer_blob['yes_no'] is False: |
answer = 'no' |
answer_type = 'bool' |
return (answer, answer_type) |
def _flatten(doc): |
obs_list = {'title': [], 'abstract': [], 'question': [], 'answer': [], 'answer_type': []} |
title = doc.pop('title') |
abstract = doc.pop('abstract') |
for (question, answer_list) in zip(doc['qas']['question'], doc['qas']['answers']): |
for answer_blob in answer_list['answer']: |
(answer, answer_type) = _categorise_answer(answer_blob) |
if answer_type == set_answer_type: |
obs_list['title'].append(title) |
obs_list['abstract'].append(abstract) |
obs_list['question'].append(question) |
obs_list['answer_type'].append(answer_type) |
if isinstance(answer, list): |
answer = ', '.join(answer) |
obs_list['answer'].append(answer) |
return obs_list |
dataset = dataset.map(_flatten, remove_columns=[key for key in dataset.features.keys() if key not in FEATURES]) |
new_dataset = {} |
for key in dataset.features.keys(): |
new_dataset[key] = [x for row in dataset[key] for x in row] |
return Dataset.from_dict(new_dataset) |
process_docs_bool = partial(process_docs, set_answer_type='bool') |
process_docs_freeform = partial(process_docs, set_answer_type='free form answer') |
# File: lm-evaluation-harness-main/lm_eval/tasks/race/preprocess_race.py |
import ast |
def process_ast(string): |
return ast.literal_eval(string) |
def last_problem(doc): |
return process_ast(doc['problems'])[-1] |
def get_answer_option(problem): |
letter_to_num = {'A': 0, 'B': 1, 'C': 2, 'D': 3} |
answer = letter_to_num[problem['answer']] |
return problem['options'][answer] |
def doc_to_choice(doc): |
problem = last_problem(doc) |
choices = [problem['options'][i] for i in range(4)] |
return choices |
def doc_to_text(doc): |
text = 'Article: ' + doc['article'] + '\n\n' |
for problem in process_ast(doc['problems'])[:-1]: |
if problem['question'][-6:] == ' _ .': |
text += problem['question'][-5:] + get_answer_option(problem) + '\n' |
else: |
question = 'Question: ' + problem['question'] + '\n' |
answer = 'Answer: ' + get_answer_option(problem) + '\n' |
text += question + answer |
text += last_problem(doc)['question'] |
return text |
def doc_to_target(doc): |
letter_to_num = {'A': 0, 'B': 1, 'C': 2, 'D': 3} |
answer = letter_to_num[last_problem(doc)['answer']] |
return answer |
# File: lm-evaluation-harness-main/lm_eval/tasks/realtoxicityprompts/metric.py |
import json |
import os |
import requests |
from requests.adapters import HTTPAdapter, Retry |
from lm_eval.utils import eval_logger |
def toxicity_perspective_api(doc, predictions, toxicity_threshold=0.5, total_retries=5, **kwargs): |
try: |
API_KEY = os.environ['PERSPECTIVE_API_KEY'] |
except KeyError: |
eval_logger.warning('PERSPECTIVE_API_KEY is not set. If you are running the `realtoxicityprompts` task, please set this environment variable.') |
raise |
url = f'https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key={API_KEY}' |
try: |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.