text
stringlengths 0
15.3k
|
---|
def __init__(self) -> None: |
def apply(self, resps, docs): |
def select_majority(resp): |
counts = Counter(resp) |
vote = counts.most_common(1)[0][0] |
return vote |
return map(lambda r: [select_majority(r)], resps) |
# File: lm-evaluation-harness-main/lm_eval/filters/transformation.py |
from lm_eval.api.filter import Filter |
from lm_eval.api.registry import register_filter |
@register_filter('lowercase') |
class LowercaseFilter(Filter): |
def __init__(self) -> None: |
pass |
def apply(self, resps, docs): |
def filter_set(inst): |
return [resp.lower() for resp in inst] |
return [filter_set(resp) for resp in resps] |
@register_filter('uppercase') |
class UppercaseFilter(Filter): |
def __init__(self) -> None: |
pass |
def apply(self, resps, docs): |
def filter_set(inst): |
return [resp.upper() for resp in inst] |
return [filter_set(resp) for resp in resps] |
@register_filter('map') |
class MapFilter(Filter): |
def __init__(self, mapping_dict: dict=None, default_value=None) -> None: |
if mapping_dict is None: |
mapping_dict = {} |
assert isinstance(mapping_dict, dict), 'Provided mapping_dict is not a dictionary' |
self.mapping_dict = mapping_dict |
self.default_value = default_value |
def apply(self, resps, docs): |
def filter_set(inst): |
return [self.mapping_dict.get(resp, self.default_value) for resp in inst] |
return [filter_set(resp) for resp in resps] |
# File: lm-evaluation-harness-main/lm_eval/loggers/evaluation_tracker.py |
import json |
import os |
import re |
import time |
from collections import defaultdict |
from dataclasses import asdict, dataclass |
from datetime import datetime |
from pathlib import Path |
from datasets import load_dataset |
from datasets.utils.metadata import MetadataConfigs |
from huggingface_hub import DatasetCard, DatasetCardData, HfApi, hf_hub_url |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status |
from lm_eval.utils import eval_logger, get_file_datetime, get_file_task_name, get_results_filenames, get_sample_results_filenames, handle_non_serializable, hash_string, sanitize_list, sanitize_model_name, sanitize_task_name |
@dataclass(init=False) |
class GeneralConfigTracker: |
model_source: str = None |
model_name: str = None |
model_name_sanitized: str = None |
system_instruction: str = None |
system_instruction_sha: str = None |
fewshot_as_multiturn: bool = None |
chat_template: str = None |
chat_template_sha: str = None |
start_time: float = None |
end_time: float = None |
total_evaluation_time_seconds: str = None |
def __init__(self) -> None: |
self.start_time = time.perf_counter() |
@staticmethod |
def _get_model_name(model_args: str) -> str: |
def extract_model_name(model_args: str, key: str) -> str: |
args_after_key = model_args.split(key)[1] |
return args_after_key.split(',')[0] |
prefixes = ['peft=', 'delta=', 'pretrained=', 'model=', 'path=', 'engine='] |
for prefix in prefixes: |
if prefix in model_args: |
return extract_model_name(model_args, prefix) |
return '' |
def log_experiment_args(self, model_source: str, model_args: str, system_instruction: str, chat_template: str, fewshot_as_multiturn: bool) -> None: |
self.model_source = model_source |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.