text
stringlengths
0
15.3k
if bool(results):
(results, versions, show_group_table, *_) = consolidate_group_results(results, versions, task_dict)
(results_agg, group_agg) = prepare_print_tasks(task_dict, results)
subtask_list = get_subtask_list(task_dict)
_higher_is_better = {}
for (group, task_list) in subtask_list.items():
if len(task_list) != 0:
for task in task_list:
for (m, h) in higher_is_better[task].items():
if m not in _higher_is_better.keys():
_higher_is_better[m] = h
if m in _higher_is_better and _higher_is_better[m] is not None and (_higher_is_better[m] != h):
eval_logger.warning(f'Higher_is_better values for metric {m} in group {group} are not consistent. Defaulting to None.')
_higher_is_better[m] = None
higher_is_better[group] = _higher_is_better
results_dict = {'results': dict(results_agg.items()), **({'groups': dict(group_agg.items())} if bool(group_agg) & show_group_table else {}), 'group_subtasks': dict(reversed(subtask_list.items())), 'configs': dict(sorted(configs.items())), 'versions': dict(sorted(versions.items())), 'n-shot': dict(sorted(num_fewshot.items())), 'higher_is_better': dict(sorted(higher_is_better.items())), 'n-samples': {task_output.task_name: {'original': len(task_output.task.eval_docs), 'effective': min(limit if limit else len(task_output.task.eval_docs), len(task_output.task.eval_docs))} for task_output in eval_tasks}}
if log_samples:
results_dict['samples'] = dict(samples)
return results_dict
else:
return None
def request_caching_arg_to_dict(cache_requests: str) -> dict:
request_caching_args = {'cache_requests': cache_requests in {'true', 'refresh'}, 'rewrite_requests_cache': cache_requests == 'refresh', 'delete_requests_cache': cache_requests == 'delete'}
return request_caching_args
# File: lm-evaluation-harness-main/lm_eval/filters/__init__.py
from functools import partial
from typing import List
from lm_eval.api.filter import FilterEnsemble
from lm_eval.api.registry import get_filter
from . import extraction, selection, transformation
def build_filter_ensemble(filter_name: str, components: List[List[str]]) -> FilterEnsemble:
filters = []
for (function, kwargs) in components:
if kwargs is None:
kwargs = {}
f = partial(get_filter(function), **kwargs)
filters.append(f)
return FilterEnsemble(name=filter_name, filters=filters)
# File: lm-evaluation-harness-main/lm_eval/filters/decontamination.py
from lm_eval.api.filter import Filter
from lm_eval.api.registry import register_filter
@register_filter('decontaminate')
class DecontaminationFilter(Filter):
name = 'track_decontamination'
def __init__(self, path) -> None:
self._decontam_results = None
def apply(self, resps, docs) -> None:
pass
# File: lm-evaluation-harness-main/lm_eval/filters/extraction.py
import re
import sys
import unicodedata
from lm_eval.api.filter import Filter
from lm_eval.api.registry import register_filter
@register_filter('regex')
class RegexFilter(Filter):
""""""
def __init__(self, regex_pattern: str='#### (\\-?[0-9\\.\\,]+)', group_select=0, fallback: str='[invalid]') -> None:
self.regex_pattern = regex_pattern
self.regex = re.compile(regex_pattern)
self.group_select = group_select
self.fallback = fallback
def apply(self, resps, docs):
def filter_set(inst):
filtered = []
for resp in inst:
match = self.regex.findall(resp)
if match:
match = match[self.group_select]
if isinstance(match, tuple):
match = [m for m in match if m][0]
match = match.strip()
else:
match = self.fallback
filtered.append(match)
return filtered
filtered_resps = list(map(lambda x: filter_set(x), resps))
return filtered_resps
@register_filter('remove_whitespace')
class WhitespaceFilter(Filter):
""""""
def __init__(self) -> None:
pass
def apply(self, resps, docs):