text
stringlengths 0
15.3k
|
---|
def has_training_docs(self): |
return True |
def has_validation_docs(self): |
return True |
def has_test_docs(self): |
return False |
def training_docs(self): |
processed_docs = list(map(self._process_doc, self.dataset['train'])) |
processed_docs = [item for sublist in processed_docs for item in sublist] |
processed_dict = {key: [d[key] for d in processed_docs] for key in processed_docs[0]} |
return Dataset.from_dict(processed_dict) |
def validation_docs(self): |
processed_docs = list(map(self._process_doc, self.dataset['validation'])) |
processed_docs = [item for sublist in processed_docs for item in sublist] |
processed_dict = {key: [d[key] for d in processed_docs] for key in processed_docs[0]} |
return Dataset.from_dict(processed_dict) |
def should_decontaminate(self): |
return True |
def doc_to_decontamination_query(self, doc): |
return doc['input'] |
def download(self, *args, **kwargs): |
super().download(*args, **kwargs) |
del self.dataset['test'] |
for split in self.dataset: |
self.dataset[split] = _drop_duplicates_in_input(self.dataset[split]) |
if self.PRUNE_TOKENIZERS is not None: |
self.prune() |
def _get_prune_text(self, sample): |
return self.doc_to_text(self._process_doc(sample)[0]) |
def prune(self): |
tokenizers = [AutoTokenizer.from_pretrained(tokenizer) for tokenizer in self.PRUNE_TOKENIZERS] |
cache = {} |
def _filter(sample): |
text = self._get_prune_text(sample) |
cached = cache.get(text, None) |
if cached is None: |
for tokenizer in tokenizers: |
if len(tokenizer(text).input_ids) > self.PRUNE_MAX_TOKENS: |
cache[text] = False |
return False |
cache[text] = True |
return True |
else: |
return cached |
self.dataset = self.dataset.filter(_filter, num_proc=self.PRUNE_NUM_PROC) |
def doc_to_target(self, doc): |
return ' ' + ', '.join(doc['outputs']) |
def doc_to_text(self, doc): |
return f"{doc['text']}\n\nQuestion: {doc['question']}\nAnswer:" |
def higher_is_better(self): |
return {x: True for x in self._scrolls_metrics().keys()} |
@abstractmethod |
def _scrolls_metrics(self): |
pass |
def _make_compute_metrics(self, value): |
def compute_metrics(samples): |
(predictions, references) = zip(*samples) |
computed = self.metric.compute(predictions=predictions, references=references) |
return computed[value] |
return compute_metrics |
def aggregation(self): |
return {key: self._make_compute_metrics(value) for (key, value) in self._scrolls_metrics().items()} |
class _SCROLLSMultipleChoiceTask(_SCROLLSTask): |
def __post_init__(self): |
self.metric = None |
def _scrolls_metrics(self): |
return None |
def aggregation(self): |
return {'em': mean, 'acc': mean, 'acc_norm': mean} |
def higher_is_better(self): |
return {'em': True, 'acc': True, 'acc_norm': True} |
def process_results(self, doc, results): |
gold = doc['gold'] |
(lls, _) = zip(*results) |
acc = 1.0 if np.argmax(lls) == gold else 0.0 |
completion_len = np.array([float(len(i)) for i in doc['choices']]) |
acc_norm = 1.0 if np.argmax(lls / completion_len) == gold else 0.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.