text
stringlengths
0
15.3k
_results = copy.deepcopy(self.results.get('results', dict()))
tmp_results = copy.deepcopy(_results)
for task_name in self.task_names:
task_result = tmp_results.get(task_name, dict())
for (metric_name, metric_value) in task_result.items():
(_metric_name, removed) = remove_none_pattern(metric_name)
if removed:
_results[task_name][_metric_name] = metric_value
_results[task_name].pop(metric_name)
wandb_summary = {}
for task in self.task_names:
task_result = _results.get(task, dict())
for (metric_name, metric_value) in task_result.items():
if isinstance(metric_value, str):
wandb_summary[f'{task}/{metric_name}'] = metric_value
for (summary_metric, summary_value) in wandb_summary.items():
(_task, _summary_metric) = summary_metric.split('/')
_results[_task].pop(_summary_metric)
tmp_results = copy.deepcopy(_results)
for (task_name, task_results) in tmp_results.items():
for (metric_name, metric_value) in task_results.items():
_results[f'{task_name}/{metric_name}'] = metric_value
_results[task_name].pop(metric_name)
for task in self.task_names:
_results.pop(task)
return (wandb_summary, _results)
def _log_results_as_table(self) -> None:
columns = ['Version', 'Filter', 'num_fewshot', 'Metric', 'Value', 'Stderr']
def make_table(columns: List[str], key: str='results'):
import wandb
table = wandb.Table(columns=columns)
results = copy.deepcopy(self.results)
for (k, dic) in results.get(key).items():
if k in self.group_names and (not key == 'groups'):
continue
version = results.get('versions').get(k)
if version == 'N/A':
version = None
n = results.get('n-shot').get(k)
for (mf, v) in dic.items():
(m, _, f) = mf.partition(',')
if m.endswith('_stderr'):
continue
if m == 'alias':
continue
if m + '_stderr' + ',' + f in dic:
se = dic[m + '_stderr' + ',' + f]
if se != 'N/A':
se = '%.4f' % se
table.add_data(*[k, version, f, n, m, str(v), str(se)])
else:
table.add_data(*[k, version, f, n, m, str(v), ''])
return table
table = make_table(['Tasks'] + columns, 'results')
self.run.log({'evaluation/eval_results': table})
if 'groups' in self.results.keys():
table = make_table(['Groups'] + columns, 'groups')
self.run.log({'evaluation/group_eval_results': table})
def _log_results_as_artifact(self) -> None:
import wandb
dumped = json.dumps(self.results, indent=2, default=_handle_non_serializable, ensure_ascii=False)
artifact = wandb.Artifact('results', type='eval_results')
with artifact.new_file('results.json', mode='w', encoding='utf-8') as f:
f.write(dumped)
self.run.log_artifact(artifact)
def log_eval_result(self) -> None:
configs = self._get_config()
self.run.config.update(configs)
(wandb_summary, self.wandb_results) = self._sanitize_results_dict()
self.run.summary.update(wandb_summary)
self.run.log(self.wandb_results)
self._log_results_as_table()
self._log_results_as_artifact()
def _generate_dataset(self, data: List[Dict[str, Any]], config: Dict[str, Any]) -> pd.DataFrame:
ids = [x['doc_id'] for x in data]
labels = [x['target'] for x in data]
instance = [''] * len(ids)
resps = [''] * len(ids)
filtered_resps = [''] * len(ids)
model_outputs = {}
metrics_list = config['metric_list']
metrics = {}
for metric in metrics_list:
metric = metric.get('metric')
if metric in ['word_perplexity', 'byte_perplexity', 'bits_per_byte']:
metrics[f'{metric}_loglikelihood'] = [x[metric][0] for x in data]
if metric in ['byte_perplexity', 'bits_per_byte']:
metrics[f'{metric}_bytes'] = [x[metric][1] for x in data]
else:
metrics[f'{metric}_words'] = [x[metric][1] for x in data]
else:
metrics[metric] = [x[metric] for x in data]
if config['output_type'] == 'loglikelihood':
instance = [x['arguments'][0][0] for x in data]
labels = [x['arguments'][0][1] for x in data]