text
stringlengths 0
15.3k
|
---|
return {'acc': acc, 'acc_norm': acc_norm, 'em': acc_norm * 100.0} |
def construct_requests(self, doc, ctx, **kwargs): |
request_list = [Instance(request_type='loglikelihood', doc=doc, arguments=(ctx, ' {}'.format(choice)), idx=i, **kwargs) for (i, choice) in enumerate(doc['choices'])] |
return request_list |
class _SCROLLSSummaryTask(_SCROLLSTask): |
def _process_doc(self, doc): |
return [doc] |
def _scrolls_metrics(self): |
return {'rouge1': 'rouge/rouge1', 'rouge2': 'rouge/rouge2', 'rougeL': 'rouge/rougeL'} |
def process_results(self, doc, results): |
return {'rouge1': (results[0], doc['outputs']), 'rouge2': (results[0], doc['outputs']), 'rougeL': (results[0], doc['outputs'])} |
def construct_requests(self, doc, ctx, **kwargs): |
return Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n']}), idx=0, **kwargs) |
def doc_to_text(self, doc): |
return f"{doc['input']}\n\nQuestion: What is a summary of the preceding text?\nAnswer:" |
class Qasper(_SCROLLSTask): |
DATASET_NAME = 'qasper' |
def _process_doc(self, doc): |
doc = _process_doc_prepended_question(doc) |
doc['is_yes_no'] = reduce(lambda prev, cur: prev and squad_metrics.normalize_answer(cur) in ['yes', 'no'], doc['outputs'], True) |
return [doc] |
def _scrolls_metrics(self): |
return {'f1': 'f1'} |
def process_results(self, doc, results): |
if doc['is_yes_no']: |
prediction = ' yes' if results[0] > results[1] else ' no' |
elif len(results[0].strip()) == 0: |
prediction = 'Unanswerable' |
else: |
prediction = results[0] |
return {'f1': (prediction, doc['outputs'])} |
def construct_requests(self, doc, ctx, **kwargs): |
if doc['is_yes_no']: |
return [Instance(request_type='loglikelihood', doc=doc, arguments=(ctx, ' yes'), idx=0, **kwargs), Instance(request_type='loglikelihood', doc=doc, arguments=(ctx, ' no'), idx=1, **kwargs)] |
else: |
return Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n']}), idx=0, **kwargs) |
class QuALITY(_SCROLLSMultipleChoiceTask): |
DATASET_NAME = 'quality' |
_multiple_choice_pattern = re.compile(' *\\([A-D]\\) *') |
@staticmethod |
def _normalize_answer(text): |
return ' '.join(text.split()).strip() |
def _process_doc(self, doc): |
doc = _process_doc_prepended_question(doc) |
split = doc['text'].find('\n\n', doc['text'].find('(D)')) |
choices_text = doc['text'][:split] |
doc['text'] = doc['text'][split:].strip() |
doc['choices'] = [QuALITY._normalize_answer(choice) for choice in re.split(QuALITY._multiple_choice_pattern, choices_text)[1:]] |
doc['gold'] = doc['choices'].index(QuALITY._normalize_answer(doc['outputs'][0])) |
return [doc] |
class NarrativeQA(_SCROLLSTask): |
DATASET_NAME = 'narrative_qa' |
def _process_doc(self, doc): |
return [_process_doc_prepended_question(doc)] |
def _scrolls_metrics(self): |
return {'f1': 'f1'} |
def _get_prune_text(self, doc): |
return self._process_doc(doc)[0]['text'] |
def process_results(self, doc, results): |
return {'f1': (results[0], doc['outputs'])} |
def construct_requests(self, doc, ctx, **kwargs): |
return Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n']}), idx=0, **kwargs) |
class ContractNLI(_SCROLLSMultipleChoiceTask): |
DATASET_NAME = 'contract_nli' |
CHOICES = ['Not mentioned', 'Entailment', 'Contradiction'] |
def _process_doc(self, doc): |
doc = _process_doc_prepended_question(doc) |
doc['choices'] = ContractNLI.CHOICES |
doc['gold'] = ContractNLI.CHOICES.index(doc['outputs'][0]) |
return [doc] |
def doc_to_text(self, doc): |
return f"{doc['text']}\n\nHypothesis: {doc['question']}\nConclusion:" |
class GovReport(_SCROLLSSummaryTask): |
DATASET_NAME = 'gov_report' |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.