text
stringlengths
0
15.3k
target_delimiter = self.config.target_delimiter
if apply_chat_template:
target_delimiter = ''
if self.multiple_input:
cont = self.doc_to_target(doc)
arguments = [(ctx + choice, f'{target_delimiter}{cont}') for choice in choices]
else:
arguments = [(ctx, f'{target_delimiter}{cont}') for cont in choices]
request_list = [Instance(request_type='loglikelihood', doc=doc, arguments=arg, idx=i, **kwargs) for (i, arg) in enumerate(arguments)]
if 'acc_mutual_info' in self._metric_fn_list.keys():
request_list.extend([Instance(request_type='loglikelihood', doc=doc, arguments=('', '{}'.format(choice)), idx=i, **kwargs) for (i, choice) in enumerate(choices)])
return request_list
elif self.OUTPUT_TYPE == 'generate_until':
arguments = (ctx, deepcopy(self.config.generation_kwargs))
return Instance(request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs)
def process_results(self, doc, results):
if callable(self.config.process_results):
return self.config.process_results(doc, results)
result_dict = {}
use_metric = list(self._metric_fn_list.keys())
if self.OUTPUT_TYPE == 'loglikelihood':
results = results[0]
(ll, is_greedy) = results
return {**({'perplexity': ll} if 'perplexity' in use_metric else {}), **({'acc': int(is_greedy)} if 'acc' in use_metric else {})}
elif self.OUTPUT_TYPE == 'loglikelihood_rolling':
(loglikelihood,) = results
_words = self.count_words(self.doc_to_target(doc))
_bytes = self.count_bytes(self.doc_to_target(doc))
return {**({'word_perplexity': (loglikelihood, _words)} if 'word_perplexity' in use_metric else {}), **({'byte_perplexity': (loglikelihood, _bytes)} if 'byte_perplexity' in use_metric else {}), **({'bits_per_byte': (loglikelihood, _bytes)} if 'bits_per_byte' in use_metric else {})}
elif self.OUTPUT_TYPE == 'multiple_choice':
(lls, is_greedy) = zip(*results)
choices = self.doc_to_choice(doc)
completion_len = np.array([float(len(i)) for i in choices])
if 2 * len(choices) == len(lls) and 'acc_mutual_info' in self._metric_fn_list.keys():
lls_unconditional = lls[1::2]
if len(lls_unconditional) != len(choices):
raise ValueError
lls = lls[::2]
pred = np.argmax(lls)
pred_norm = np.argmax(lls / completion_len)
if self.multiple_input:
gold = self.doc_to_text(doc)
else:
gold = self.doc_to_target(doc)
gold_index_error = False
if isinstance(gold, list):
gold = [i if i < len(choices) else -100 for i in gold]
if -100 in gold:
gold_index_error = True
else:
if isinstance(gold, int):
gold = gold if gold < len(choices) else -100
elif isinstance(gold, str):
gold = choices.index(gold) if gold in choices else -100
if gold == -100:
gold_index_error = True
if gold_index_error:
eval_logger.warning(f'Label index was not in within range of available choices,Sample:\n\n{doc}\n\n')
if self.multiple_target:
acc = 1.0 if pred in gold else 0.0
acc_norm = 1.0 if pred_norm in gold else 0.0
exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
else:
acc = 1.0 if pred == gold else 0.0
acc_norm = 1.0 if pred_norm == gold else 0.0
exact_match = int(is_greedy[gold]) if gold != -100 else 0
prob_norm = utils.softmax(lls)
result_dict = {**({'acc': acc} if 'acc' in use_metric else {}), **({'f1': (gold, pred)} if 'f1' in use_metric else {}), **({'mcc': (gold, pred)} if 'mcc' in use_metric else {}), **({'acc_norm': acc_norm} if 'acc_norm' in use_metric else {}), **({'exact_match': exact_match} if 'exact_match' in use_metric else {}), **({'brier_score': (gold, prob_norm)} if 'brier_score' in use_metric else {})}
if 'acc_mutual_info' in use_metric:
lls_mutual_info = [ll_c - ll_u for (ll_c, ll_u) in zip(lls, lls_unconditional)]
acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
result_dict['acc_mutual_info'] = acc_mutual_info
elif self.OUTPUT_TYPE == 'generate_until':
gold = self.doc_to_target(doc)
result = results[0]
if self.config.doc_to_choice is not None:
choices = self.doc_to_choice(doc)
gold = choices[gold]
elif self.multiple_target:
gold = list(gold)
elif type(gold) != type(result):
gold = type(result)(gold)
for metric in self._metric_fn_list.keys():
if self.multiple_target:
scores = []
if not isinstance(gold, list):
gold = [gold]
if metric == 'exact_match':
result = [result for _ in range(len(gold))]
scores = self._metric_fn_list[metric](references=gold, predictions=result, **self._metric_fn_kwargs[metric])[metric]
result_score = 1.0 if scores > 0.0 else 0.0
else:
for gold_option in gold:
try:
result_score = self._metric_fn_list[metric](references=[gold_option], predictions=[result], **self._metric_fn_kwargs[metric])
except TypeError:
result_score = self._metric_fn_list[metric]([gold_option, result])
if isinstance(result_score, dict):
result_score = result_score[metric]