text
stringlengths
0
15.3k
if check_integrity:
run_task_tests(task_list=tasks)
if evaluation_tracker is not None:
evaluation_tracker.general_config_tracker.log_experiment_args(model_source=model, model_args=model_args, system_instruction=system_instruction, chat_template=lm.chat_template if apply_chat_template else None, fewshot_as_multiturn=fewshot_as_multiturn)
results = evaluate(lm=lm, task_dict=task_dict, limit=limit, cache_requests=cache_requests, rewrite_requests_cache=rewrite_requests_cache, bootstrap_iters=bootstrap_iters, write_out=write_out, log_samples=True if predict_only else log_samples, system_instruction=system_instruction, apply_chat_template=apply_chat_template, fewshot_as_multiturn=fewshot_as_multiturn, verbosity=verbosity)
if lm.rank == 0:
if isinstance(model, str):
model_name = model
elif hasattr(model, 'config') and hasattr(model.config, '_name_or_path'):
model_name = model.config._name_or_path
else:
model_name = type(model).__name__
results['config'] = {'model': model_name, 'model_args': model_args}
if isinstance(lm, lm_eval.models.huggingface.HFLM):
results['config'].update(lm.get_model_info())
results['config'].update({'batch_size': batch_size, 'batch_sizes': list(lm.batch_sizes.values()) if hasattr(lm, 'batch_sizes') else [], 'device': device, 'use_cache': use_cache, 'limit': limit, 'bootstrap_iters': bootstrap_iters, 'gen_kwargs': gen_kwargs, 'random_seed': random_seed, 'numpy_seed': numpy_random_seed, 'torch_seed': torch_random_seed, 'fewshot_seed': fewshot_random_seed})
results['git_hash'] = get_git_commit_hash()
results['date'] = start_date
add_env_info(results)
add_tokenizer_info(results, lm)
return results
else:
return None
@positional_deprecated
def evaluate(lm: 'LM', task_dict, limit: Optional[int]=None, cache_requests: bool=False, rewrite_requests_cache: bool=False, bootstrap_iters: Optional[int]=100000, write_out: bool=False, log_samples: bool=True, system_instruction: Optional[str]=None, apply_chat_template: bool=False, fewshot_as_multiturn: bool=False, verbosity: str='INFO'):
eval_logger.setLevel(getattr(logging, f'{verbosity}'))
requests = defaultdict(list)
padding_requests = defaultdict(int)
eval_tasks = get_task_list(task_dict)
if not log_samples:
if not all(('bypass' not in getattr(task_output.task, '_metric_fn_list', {}).keys() for task_output in eval_tasks)):
raise ValueError("log_samples must be True for 'bypass' metric-only tasks")
for task_output in eval_tasks:
task: Task = task_output.task
limit = get_sample_size(task, limit)
task.build_all_requests(limit=limit, rank=lm.rank, world_size=lm.world_size, cache_requests=cache_requests, rewrite_requests_cache=rewrite_requests_cache, system_instruction=system_instruction, apply_chat_template=apply_chat_template, fewshot_as_multiturn=fewshot_as_multiturn, chat_template=getattr(lm, 'apply_chat_template') if apply_chat_template else None, tokenizer_name=getattr(lm, 'tokenizer_name', '') if apply_chat_template else '')
eval_logger.debug(f'Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}')
if write_out:
print_writeout(task)
for instance in task.instances:
reqtype = instance.request_type
requests[reqtype].append(instance)
if lm.world_size > 1:
instances_rnk = torch.tensor(len(task._instances), device=lm.device)
gathered_item = lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
reqtype = 'loglikelihood' if task.OUTPUT_TYPE == 'multiple_choice' else task.OUTPUT_TYPE
numpad = max(gathered_item) - gathered_item[lm.rank]
padding_requests[reqtype] += numpad
for (reqtype, reqs) in requests.items():
eval_logger.info(f'Running {reqtype} requests')
cloned_reqs = []
for req in reqs:
cloned_reqs.extend([req] * req.repeats)
if lm.world_size > 1 and padding_requests[reqtype] > 0:
for _ in range(padding_requests[reqtype]):
cloned_reqs.extend([req] * req.repeats)
resps = getattr(lm, reqtype)(cloned_reqs)
for (x, req) in zip(resps, cloned_reqs):
req.resps.append(x)
if lm.world_size > 1:
lm.accelerator.wait_for_everyone()
RANK = lm.rank
WORLD_SIZE = lm.world_size
for task_output in eval_tasks:
task = task_output.task
task.apply_filters()
instances_by_doc_id = defaultdict(list)
for instance in task.instances:
instances_by_doc_id[instance.doc_id].append(instance)
for instances in instances_by_doc_id.values():
instances.sort(key=lambda x: x.idx)
for filter_key in task.instances[0].filtered_resps.keys():
doc_iterator = task.doc_iterator(rank=RANK, limit=limit, world_size=WORLD_SIZE)
for (doc_id, doc) in doc_iterator:
requests = instances_by_doc_id[doc_id]
metrics = task.process_results(doc, [req.filtered_resps[filter_key] for req in requests])
if log_samples:
target = task.doc_to_target(doc)
example = {'doc_id': doc_id, 'doc': doc, 'target': target, 'arguments': [req.args for req in requests], 'resps': [req.resps for req in requests], 'filtered_resps': [req.filtered_resps[filter_key] for req in requests], 'doc_hash': hash_string(json.dumps(requests[0].doc, indent=2, default=handle_non_serializable, ensure_ascii=False)), 'prompt_hash': hash_string(requests[0].arguments[0]), 'target_hash': hash_string(str(target))}
example.update(metrics)
task_output.logged_samples.append(example)
for (metric, value) in metrics.items():
task_output.sample_metrics[metric, filter_key].append(value)
if WORLD_SIZE > 1:
for task_output in eval_tasks:
if log_samples:
full_samples = [None] * WORLD_SIZE if RANK == 0 else None
torch.distributed.gather_object(obj=task_output.logged_samples, object_gather_list=full_samples, dst=0)
if RANK == 0:
task_output.logged_samples = list(itertools.chain.from_iterable(full_samples))
for metrics in task_output.sample_metrics:
metric_list = [None] * WORLD_SIZE if RANK == 0 else None
torch.distributed.gather_object(obj=task_output.sample_metrics[metrics], object_gather_list=metric_list, dst=0)
if RANK == 0:
task_output.sample_metrics[metrics] = list(itertools.chain.from_iterable(metric_list))
if RANK == 0:
for task_output in eval_tasks:
task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters)
(results, samples, configs, versions, num_fewshot, higher_is_better) = consolidate_results(eval_tasks)