text
stringlengths 0
15.3k
|
---|
return letters[:num_choices] |
# File: lm-evaluation-harness-main/lm_eval/tasks/eus_trivia/utils.py |
from typing import List |
letters = ['A', 'B', 'C', 'D'] |
def doc_to_text(doc) -> str: |
candidates = doc['candidates'] |
num_choices = len(candidates) |
if num_choices < 2: |
raise ValueError('Invalid number of candidates') |
choices = letters[:num_choices] |
formatted_choices = '\n'.join([f'{choice}: {candidates[i]}' for (i, choice) in enumerate(choices)]) |
return f"Galdera: {doc['question']}\n{formatted_choices}\nErantzuna:" |
def doc_to_choice(doc) -> List[str]: |
num_choices = len(doc['candidates']) |
if num_choices < 2: |
raise ValueError('Invalid number of candidates') |
return letters[:num_choices] |
# File: lm-evaluation-harness-main/lm_eval/tasks/fda/task.py |
import re |
from typing import List |
import numpy as np |
from lm_eval.api.instance import Instance |
from lm_eval.api.task import ConfigurableTask |
class FDA(ConfigurableTask): |
VERSION = 0 |
DATASET_PATH = 'hazyresearch/based-fda' |
DATASET_NAME = 'default' |
def __init__(self, **kwargs): |
super().__init__(config={'metadata': {'version': self.VERSION}}) |
def has_training_docs(self): |
return False |
def has_validation_docs(self): |
return True |
def has_test_docs(self): |
return False |
def validation_docs(self): |
return self.dataset['validation'] |
def doc_to_text(self, doc): |
return doc['text'] |
def doc_to_target(self, doc): |
return doc['value'] |
def construct_requests(self, doc, ctx, **kwargs): |
return [Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n'], 'max_gen_toks': 48}), idx=0, **kwargs)] |
def process_results(self, doc, results): |
continuation = results |
return {'contains': contains_score(continuation[0], [doc['value']])} |
def aggregation(self): |
return {'contains': np.mean} |
def higher_is_better(self): |
return {'contains': True} |
def contains_score(prediction: str, labels: List[str]): |
return max((int(bool(re.search(re.compile(re.escape(label), re.IGNORECASE), prediction))) for label in labels)) |
# File: lm-evaluation-harness-main/lm_eval/tasks/french_bench/preprocess_wikitext.py |
import re |
def wikitext_detokenizer(doc): |
string = doc['paragraph'] |
string = string.replace("s '", "s'") |
string = re.sub("/' [0-9]/", "/'[0-9]/", string) |
string = string.replace(' @-@ ', '-') |
string = string.replace(' @,@ ', ',') |
string = string.replace(' @.@ ', '.') |
string = string.replace(' : ', ': ') |
string = string.replace(' ; ', '; ') |
string = string.replace(' . ', '. ') |
string = string.replace(' ! ', '! ') |
string = string.replace(' ? ', '? ') |
string = string.replace(' , ', ', ') |
string = re.sub('\\(\\s*([^\\)]*?)\\s*\\)', '(\\1)', string) |
string = re.sub('\\[\\s*([^\\]]*?)\\s*\\]', '[\\1]', string) |
string = re.sub('{\\s*([^}]*?)\\s*}', '{\\1}', string) |
string = re.sub('\\"\\s*([^\\"]*?)\\s*\\"', '"\\1"', string) |
string = re.sub("'\\s*([^']*?)\\s*'", "'\\1'", string) |
string = string.replace('= = = =', '====') |
string = string.replace('= = =', '===') |
string = string.replace('= =', '==') |
string = string.replace(' ' + chr(176) + ' ', chr(176)) |
string = string.replace(' \n', '\n') |
string = string.replace('\n ', '\n') |
string = string.replace(' N ', ' 1 ') |
string = string.replace(" 's", "'s") |
return string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.