text
stringlengths 0
15.3k
|
---|
# File: lm-evaluation-harness-main/lm_eval/evaluator.py |
import itertools |
import json |
import logging |
import random |
import time |
from collections import defaultdict |
from typing import TYPE_CHECKING, List, Optional, Union |
import numpy as np |
import torch |
import lm_eval.api.metrics |
import lm_eval.api.registry |
import lm_eval.api.task |
import lm_eval.models |
from lm_eval.caching.cache import delete_cache |
from lm_eval.evaluator_utils import consolidate_group_results, consolidate_results, get_sample_size, get_subtask_list, get_task_list, prepare_print_tasks, print_writeout, run_task_tests |
from lm_eval.loggers import EvaluationTracker |
from lm_eval.loggers.utils import add_env_info, add_tokenizer_info, get_git_commit_hash |
from lm_eval.tasks import TaskManager, get_task_dict |
from lm_eval.utils import eval_logger, handle_non_serializable, hash_string, positional_deprecated, simple_parse_args_string |
if TYPE_CHECKING: |
from lm_eval.api.model import LM |
from lm_eval.api.task import Task |
@positional_deprecated |
def simple_evaluate(model, model_args: Optional[Union[str, dict]]=None, tasks: Optional[List[Union[str, dict, object]]]=None, num_fewshot: Optional[int]=None, batch_size: Optional[Union[int, str]]=None, max_batch_size: Optional[int]=None, device: Optional[str]=None, use_cache: Optional[str]=None, cache_requests: bool=False, rewrite_requests_cache: bool=False, delete_requests_cache: bool=False, limit: Optional[Union[int, float]]=None, bootstrap_iters: int=100000, check_integrity: bool=False, write_out: bool=False, log_samples: bool=True, evaluation_tracker: Optional[EvaluationTracker]=None, system_instruction: Optional[str]=None, apply_chat_template: bool=False, fewshot_as_multiturn: bool=False, gen_kwargs: Optional[str]=None, task_manager: Optional[TaskManager]=None, verbosity: str='INFO', predict_only: bool=False, random_seed: int=0, numpy_random_seed: int=1234, torch_random_seed: int=1234, fewshot_random_seed: int=1234): |
eval_logger.setLevel(getattr(logging, f'{verbosity}')) |
start_date = time.time() |
if delete_requests_cache: |
eval_logger.info('Deleting requests cache...') |
delete_cache() |
seed_message = [] |
if random_seed is not None: |
seed_message.append(f'Setting random seed to {random_seed}') |
random.seed(random_seed) |
if numpy_random_seed is not None: |
seed_message.append(f'Setting numpy seed to {numpy_random_seed}') |
np.random.seed(numpy_random_seed) |
if torch_random_seed is not None: |
seed_message.append(f'Setting torch manual seed to {torch_random_seed}') |
torch.manual_seed(torch_random_seed) |
if seed_message: |
eval_logger.info(' | '.join(seed_message)) |
if tasks is None: |
tasks = [] |
if len(tasks) == 0: |
raise ValueError('No tasks specified, or no tasks found. Please verify the task names.') |
if gen_kwargs is not None: |
gen_kwargs = simple_parse_args_string(gen_kwargs) |
eval_logger.warning("generation_kwargs specified through cli, these settings will update set parameters in yaml tasks. Ensure 'do_sample=True' for non-greedy decoding!") |
if gen_kwargs == '': |
gen_kwargs = None |
if isinstance(model, str): |
if model_args is None: |
eval_logger.warning('model_args not specified. Using defaults.') |
model_args = '' |
if isinstance(model_args, dict): |
eval_logger.info(f'Initializing {model} model, with arguments: {model_args}') |
lm = lm_eval.api.registry.get_model(model).create_from_arg_obj(model_args, {'batch_size': batch_size, 'max_batch_size': max_batch_size, 'device': device}) |
else: |
eval_logger.info(f'Initializing {model} model, with arguments: {simple_parse_args_string(model_args)}') |
lm = lm_eval.api.registry.get_model(model).create_from_arg_string(model_args, {'batch_size': batch_size, 'max_batch_size': max_batch_size, 'device': device}) |
else: |
if not isinstance(model, lm_eval.api.model.LM): |
raise TypeError |
eval_logger.info('Using pre-initialized model') |
lm = model |
if use_cache is not None: |
eval_logger.info(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}") |
lm = lm_eval.api.model.CachingLM(lm, use_cache + '_rank' + str(lm.rank) + '.db') |
if task_manager is None: |
task_manager = TaskManager(verbosity) |
task_dict = get_task_dict(tasks, task_manager) |
def _adjust_config(task_dict): |
adjusted_task_dict = {} |
for (task_name, task_obj) in task_dict.items(): |
if isinstance(task_obj, dict): |
adjusted_task_dict = {**adjusted_task_dict, **{task_name: _adjust_config(task_obj)}} |
else: |
if task_obj.get_config('output_type') == 'generate_until': |
if gen_kwargs is not None: |
task_obj.set_config(key='generation_kwargs', value=gen_kwargs, update=True) |
if predict_only: |
eval_logger.info(f'Processing {task_name} in output-only mode. Metrics will not be calculated!') |
task_obj.override_metric(metric_name='bypass') |
if num_fewshot is not None: |
if (default_num_fewshot := task_obj.get_config('num_fewshot')) == 0: |
eval_logger.info(f'num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored.') |
else: |
eval_logger.warning(f'Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}') |
task_obj.set_config(key='num_fewshot', value=num_fewshot) |
elif (default_num_fewshot := task_obj.get_config('num_fewshot')) is None: |
task_obj.set_config(key='num_fewshot', value=0) |
task_obj.set_fewshot_seed(seed=fewshot_random_seed) |
eval_logger.info(f'Setting fewshot random generator seed to {fewshot_random_seed}') |
adjusted_task_dict[task_name] = task_obj |
return adjusted_task_dict |
task_dict = _adjust_config(task_dict) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.