text
stringlengths
0
15.3k
class SummScreenFD(_SCROLLSSummaryTask):
DATASET_NAME = 'summ_screen_fd'
class QMSum(_SCROLLSSummaryTask):
DATASET_NAME = 'qmsum'
def _process_doc(self, doc):
return [_process_doc_prepended_question(doc)]
def doc_to_text(self, doc):
return f"{doc['text']}\n\nQuestion: {doc['question']}\nAnswer:"
# File: lm-evaluation-harness-main/lm_eval/tasks/squad_completion/task.py
import re
from typing import List
import numpy as np
from lm_eval.api.instance import Instance
from lm_eval.api.task import ConfigurableTask
class SQUADCompletion(ConfigurableTask):
VERSION = 0
DATASET_PATH = 'hazyresearch/based-squad'
DATASET_NAME = 'default'
def __init__(self, **kwargs):
super().__init__(config={'metadata': {'version': self.VERSION}})
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def validation_docs(self):
return self.dataset['validation']
def doc_to_text(self, doc):
return doc['text']
def doc_to_target(self, doc):
return doc['value']
def construct_requests(self, doc, ctx, **kwargs):
return [Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n'], 'max_gen_toks': 48}), idx=0, **kwargs)]
def process_results(self, doc, results):
continuation = results
return {'contains': contains_score(continuation[0], [doc['value']])}
def aggregation(self):
return {'contains': np.mean}
def higher_is_better(self):
return {'contains': True}
def contains_score(prediction: str, labels: List[str]):
return max((int(bool(re.search(re.compile(re.escape(label), re.IGNORECASE), prediction))) for label in labels))
# File: lm-evaluation-harness-main/lm_eval/tasks/squadv2/task.py
""""""
from functools import partial
from math import exp
import datasets
from packaging import version
from lm_eval.api.instance import Instance
from lm_eval.api.task import ConfigurableTask
_CITATION = "\n@misc{rajpurkar2018know,\n title={Know What You Don't Know: Unanswerable Questions for SQuAD},\n author={Pranav Rajpurkar and Robin Jia and Percy Liang},\n year={2018},\n eprint={1806.03822},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
def _squad_metric(predictions, references):
squad_metric = datasets.load_metric('squad_v2')
return squad_metric.compute(predictions=predictions, references=references)
def _squad_agg(key, items):
(predictions, references) = zip(*items)
return _squad_metric(predictions=predictions, references=references).get(key, 0)
class SQuAD2(ConfigurableTask):
VERSION = 3
DATASET_PATH = 'squad_v2'
DATASET_NAME = None
def __init__(self, config=None):
super().__init__(config={'metadata': {'version': self.VERSION}})
assert version.parse(datasets.__version__) >= version.parse('1.11.0'), 'datasets v1.11.0 or later required for SQuAD'
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
return self.dataset['train']