text
stringlengths 0
15.3k
|
---|
resps = [f"log probability of continuation is {x['resps'][0][0][0]} " + '\n\n' + 'continuation will {} generated with greedy sampling'.format('not be' if not x['resps'][0][0][1] else 'be') for x in data] |
filtered_resps = [f"log probability of continuation is {x['filtered_resps'][0][0]} " + '\n\n' + 'continuation will {} generated with greedy sampling'.format('not be' if not x['filtered_resps'][0][1] else 'be') for x in data] |
elif config['output_type'] == 'multiple_choice': |
instance = [x['arguments'][0][0] for x in data] |
choices = ['\n'.join([f'{idx}. {y[1]}' for (idx, y) in enumerate(x['arguments'])]) for x in data] |
resps = [np.argmax([n[0][0] for n in x['resps']]) for x in data] |
filtered_resps = [np.argmax([n[0] for n in x['filtered_resps']]) for x in data] |
elif config['output_type'] == 'loglikelihood_rolling': |
instance = [x['arguments'][0][0] for x in data] |
resps = [x['resps'][0][0] for x in data] |
filtered_resps = [x['filtered_resps'][0] for x in data] |
elif config['output_type'] == 'generate_until': |
instance = [x['arguments'][0][0] for x in data] |
resps = [x['resps'][0][0] for x in data] |
filtered_resps = [x['filtered_resps'][0] for x in data] |
model_outputs['raw_predictions'] = resps |
model_outputs['filtered_predictions'] = filtered_resps |
df_data = {'id': ids, 'data': instance} |
if config['output_type'] == 'multiple_choice': |
df_data['choices'] = choices |
tmp_data = {'input_len': [len(x) for x in instance], 'labels': labels, 'output_type': config['output_type']} |
df_data.update(tmp_data) |
df_data.update(model_outputs) |
df_data.update(metrics) |
return pd.DataFrame(df_data) |
def _log_samples_as_artifact(self, data: List[Dict[str, Any]], task_name: str) -> None: |
import wandb |
dumped = json.dumps(data, indent=2, default=_handle_non_serializable, ensure_ascii=False) |
artifact = wandb.Artifact(f'{task_name}', type='samples_by_task') |
with artifact.new_file(f'{task_name}_eval_samples.json', mode='w', encoding='utf-8') as f: |
f.write(dumped) |
self.run.log_artifact(artifact) |
def log_eval_samples(self, samples: Dict[str, List[Dict[str, Any]]]) -> None: |
task_names: List[str] = [x for x in self.task_names if x not in self.group_names] |
ungrouped_tasks = [] |
tasks_by_groups = {} |
for task_name in task_names: |
group_names = self.task_configs[task_name].get('group', None) |
if group_names: |
if isinstance(group_names, str): |
group_names = [group_names] |
for group_name in group_names: |
if not tasks_by_groups.get(group_name): |
tasks_by_groups[group_name] = [task_name] |
else: |
tasks_by_groups[group_name].append(task_name) |
else: |
ungrouped_tasks.append(task_name) |
for task_name in ungrouped_tasks: |
eval_preds = samples[task_name] |
df = self._generate_dataset(eval_preds, self.task_configs.get(task_name)) |
self.run.log({f'{task_name}_eval_results': df}) |
self._log_samples_as_artifact(eval_preds, task_name) |
for (group, grouped_tasks) in tasks_by_groups.items(): |
grouped_df = pd.DataFrame() |
for task_name in grouped_tasks: |
eval_preds = samples[task_name] |
df = self._generate_dataset(eval_preds, self.task_configs.get(task_name)) |
df['group'] = group |
df['task'] = task_name |
grouped_df = pd.concat([grouped_df, df], ignore_index=True) |
self._log_samples_as_artifact(eval_preds, task_name) |
self.run.log({f'{group}_eval_results': grouped_df}) |
# File: lm-evaluation-harness-main/lm_eval/models/__init__.py |
from . import anthropic_llms, dummy, gguf, huggingface, mamba_lm, nemo_lm, neuralmagic, neuron_optimum, openai_completions, optimum_lm, textsynth, vllm_causallms |
try: |
import hf_transfer |
import huggingface_hub.constants |
huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True |
except ImportError: |
pass |
# File: lm-evaluation-harness-main/lm_eval/models/anthropic_llms.py |
from typing import Any, List, Tuple |
from tqdm import tqdm |
from lm_eval import utils |
from lm_eval.api.model import LM |
from lm_eval.api.registry import register_model |
from lm_eval.models.utils import retry_on_specific_exceptions |
eval_logger = utils.eval_logger |
def anthropic_completion(client, model: str, prompt: str, max_tokens_to_sample: int, temperature: float, stop: List[str], **kwargs: Any) -> str: |
try: |
import anthropic |
except ModuleNotFoundError: |
raise Exception("attempted to use 'anthropic' LM type, but package `anthropic` is not installed. please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`") |
def _exception_callback(e: Exception, sleep_time: float) -> None: |
eval_logger.warning(f'RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds') |
@retry_on_specific_exceptions(on_exceptions=[anthropic.RateLimitError], max_retries=None, on_exception_callback=_exception_callback) |
def completion(): |
response = client.completions.create(prompt=f'{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}', model=model, stop_sequences=[anthropic.HUMAN_PROMPT] + stop, max_tokens_to_sample=max_tokens_to_sample, temperature=temperature, **kwargs) |
return response.completion |
return completion() |
def anthropic_chat(client, model: str, prompt: str, max_tokens: int, temperature: float, stop: List[str], **kwargs: Any) -> str: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.