text
stringlengths 0
15.3k
|
---|
scores.append(result_score) |
if any(scores): |
result_score = 1.0 |
else: |
result_score = 0.0 |
else: |
try: |
result_score = self._metric_fn_list[metric](references=[gold], predictions=[result], **self._metric_fn_kwargs[metric]) |
except TypeError: |
result_score = self._metric_fn_list[metric]([gold, result]) |
if isinstance(result_score, dict): |
result_score = result_score[metric] |
result_dict[metric] = result_score |
else: |
raise ValueError(f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ", "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'") |
return result_dict |
def aggregation(self) -> dict: |
return self._aggregation_list |
def higher_is_better(self) -> dict: |
return self._higher_is_better |
def get_config(self, key: str) -> Any: |
return getattr(self._config, key, None) |
@property |
def task_name(self) -> Any: |
return getattr(self.config, 'task', None) |
def __repr__(self): |
return f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},output_type={self.OUTPUT_TYPE},num_fewshot={getattr(self.config, 'num_fewshot', None)},num_samples={len(self.eval_docs)})" |
class MultipleChoiceTask(Task): |
OUTPUT_TYPE = 'loglikelihood' |
def doc_to_target(self, doc: dict) -> str: |
return ' ' + doc['choices'][doc['gold']] |
def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]: |
return [Instance(request_type='loglikelihood', doc=doc, arguments=(ctx, ' {}'.format(choice)), idx=i, **kwargs) for (i, choice) in enumerate(doc['choices'])] |
def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict: |
results = [res[0] for res in results] |
gold = doc['gold'] |
acc = 1.0 if np.argmax(results) == gold else 0.0 |
completion_len = np.array([float(len(i)) for i in doc['choices']]) |
acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0 |
return {'acc': acc, 'acc_norm': acc_norm} |
def higher_is_better(self) -> dict: |
return {'acc': True, 'acc_norm': True} |
def aggregation(self) -> dict: |
return {'acc': mean, 'acc_norm': mean} |
class PerplexityTask(Task): |
OUTPUT_TYPE = 'loglikelihood_rolling' |
def has_training_docs(self) -> bool: |
return False |
def fewshot_examples(self, k: int, rnd) -> List: |
if k != 0: |
raise ValueError('The number of fewshot examples must be 0 for perplexity tasks.') |
return [] |
def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal['']: |
if num_fewshot != 0: |
raise ValueError('The number of fewshot examples must be 0 for perplexity tasks.') |
return '' |
def higher_is_better(self) -> dict: |
return {'word_perplexity': False, 'byte_perplexity': False, 'bits_per_byte': False} |
def doc_to_decontamination_query(self, doc): |
return doc |
def doc_to_text(self, doc) -> str: |
return '' |
def doc_to_target(self, doc): |
return doc |
def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs): |
if bool(ctx): |
raise ValueError |
return Instance(request_type=self.OUTPUT_TYPE, doc=doc, arguments=(self.doc_to_target(doc),), idx=0, **kwargs) |
def process_results(self, doc: dict, results: Tuple[float]) -> dict: |
(loglikelihood,) = results |
words = self.count_words(self.doc_to_target(doc)) |
bytes_ = self.count_bytes(self.doc_to_target(doc)) |
return {'word_perplexity': (loglikelihood, words), 'byte_perplexity': (loglikelihood, bytes_), 'bits_per_byte': (loglikelihood, bytes_)} |
def aggregation(self) -> dict: |
return {'word_perplexity': weighted_perplexity, 'byte_perplexity': weighted_perplexity, 'bits_per_byte': bits_per_byte} |
@classmethod |
def count_bytes(cls, doc) -> int: |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.