text
stringlengths
0
15.3k
self.model_name = GeneralConfigTracker._get_model_name(model_args)
self.model_name_sanitized = sanitize_model_name(self.model_name)
self.system_instruction = system_instruction
self.system_instruction_sha = hash_string(system_instruction) if system_instruction else None
self.chat_template = chat_template
self.chat_template_sha = hash_string(chat_template) if chat_template else None
self.fewshot_as_multiturn = fewshot_as_multiturn
def log_end_time(self) -> None:
self.end_time = time.perf_counter()
self.total_evaluation_time_seconds = str(self.end_time - self.start_time)
class EvaluationTracker:
def __init__(self, output_path: str=None, hub_results_org: str='', hub_repo_name: str='', details_repo_name: str='', results_repo_name: str='', push_results_to_hub: bool=False, push_samples_to_hub: bool=False, public_repo: bool=False, token: str='', leaderboard_url: str='', point_of_contact: str='', gated: bool=False) -> None:
self.general_config_tracker = GeneralConfigTracker()
self.output_path = output_path
self.push_results_to_hub = push_results_to_hub
self.push_samples_to_hub = push_samples_to_hub
self.public_repo = public_repo
self.leaderboard_url = leaderboard_url
self.point_of_contact = point_of_contact
self.api = HfApi(token=token) if token else None
self.gated_repo = gated
if not self.api and (push_results_to_hub or push_samples_to_hub):
raise ValueError("Hugging Face token is not defined, but 'push_results_to_hub' or 'push_samples_to_hub' is set to True. Please provide a valid Hugging Face token by setting the HF_TOKEN environment variable.")
if self.api and hub_results_org == '' and (push_results_to_hub or push_samples_to_hub):
hub_results_org = self.api.whoami()['name']
eval_logger.warning(f"hub_results_org was not specified. Results will be pushed to '{hub_results_org}'.")
if hub_repo_name == '':
details_repo_name = details_repo_name if details_repo_name != '' else 'lm-eval-results'
results_repo_name = results_repo_name if results_repo_name != '' else details_repo_name
else:
details_repo_name = hub_repo_name
results_repo_name = hub_repo_name
eval_logger.warning('hub_repo_name was specified. Both details and results will be pushed to the same repository. Using hub_repo_name is no longer recommended, details_repo_name and results_repo_name should be used instead.')
self.details_repo = f'{hub_results_org}/{details_repo_name}'
self.details_repo_private = f'{hub_results_org}/{details_repo_name}-private'
self.results_repo = f'{hub_results_org}/{results_repo_name}'
self.results_repo_private = f'{hub_results_org}/{results_repo_name}-private'
def save_results_aggregated(self, results: dict, samples: dict) -> None:
self.general_config_tracker.log_end_time()
if self.output_path:
try:
eval_logger.info('Saving results aggregated')
task_hashes = {}
if samples:
for (task_name, task_samples) in samples.items():
sample_hashes = [s['doc_hash'] + s['prompt_hash'] + s['target_hash'] for s in task_samples]
task_hashes[task_name] = hash_string(''.join(sample_hashes))
results.update({'task_hashes': task_hashes})
results.update(asdict(self.general_config_tracker))
dumped = json.dumps(results, indent=2, default=handle_non_serializable, ensure_ascii=False)
path = Path(self.output_path if self.output_path else Path.cwd())
path = path.joinpath(self.general_config_tracker.model_name_sanitized)
path.mkdir(parents=True, exist_ok=True)
self.date_id = datetime.now().isoformat().replace(':', '-')
file_results_aggregated = path.joinpath(f'results_{self.date_id}.json')
file_results_aggregated.open('w', encoding='utf-8').write(dumped)
if self.api and self.push_results_to_hub:
repo_id = self.results_repo if self.public_repo else self.results_repo_private
self.api.create_repo(repo_id=repo_id, repo_type='dataset', private=not self.public_repo, exist_ok=True)
self.api.upload_file(repo_id=repo_id, path_or_fileobj=str(path.joinpath(f'results_{self.date_id}.json')), path_in_repo=os.path.join(self.general_config_tracker.model_name, f'results_{self.date_id}.json'), repo_type='dataset', commit_message=f'Adding aggregated results for {self.general_config_tracker.model_name}')
eval_logger.info(f'Successfully pushed aggregated results to the Hugging Face Hub. You can find them at: {repo_id}')
except Exception as e:
eval_logger.warning('Could not save results aggregated')
eval_logger.info(repr(e))
else:
eval_logger.info('Output path not provided, skipping saving results aggregated')
def save_results_samples(self, task_name: str, samples: dict) -> None:
if self.output_path:
try:
eval_logger.info(f'Saving per-sample results for: {task_name}')
path = Path(self.output_path if self.output_path else Path.cwd())
path = path.joinpath(self.general_config_tracker.model_name_sanitized)
path.mkdir(parents=True, exist_ok=True)
file_results_samples = path.joinpath(f'samples_{task_name}_{self.date_id}.jsonl')
for sample in samples:
arguments = {}
for (i, arg) in enumerate(sample['arguments']):
arguments[f'gen_args_{i}'] = {}
for (j, tmp) in enumerate(arg):
arguments[f'gen_args_{i}'][f'arg_{j}'] = tmp
sample['resps'] = sanitize_list(sample['resps'])
sample['filtered_resps'] = sanitize_list(sample['filtered_resps'])
sample['arguments'] = arguments
sample['target'] = str(sample['target'])
sample_dump = json.dumps(sample, default=handle_non_serializable, ensure_ascii=False) + '\n'
with open(file_results_samples, 'a', encoding='utf-8') as f:
f.write(sample_dump)
if self.api and self.push_samples_to_hub:
repo_id = self.details_repo if self.public_repo else self.details_repo_private
self.api.create_repo(repo_id=repo_id, repo_type='dataset', private=not self.public_repo, exist_ok=True)
try:
if self.gated_repo:
headers = build_hf_headers()
r = get_session().put(url=f'https://huggingface.co/api/datasets/{repo_id}/settings', headers=headers, json={'gated': 'auto'})
hf_raise_for_status(r)