text
stringlengths 0
15.3k
|
---|
PROMPT_REGISTRY: Dict[str, Dict[str, str]] = {'qa-basic': {'question-newline-answer': 'Question: {{question}}\nAnswer:', 'q-newline-a': 'Q: {{question}}\nA:'}} |
def get_prompt(prompt_id: str, dataset_name: str=None, subset_name: str=None): |
(category_name, prompt_name) = prompt_id.split(':') |
if subset_name is None: |
dataset_full_name = dataset_name |
else: |
dataset_full_name = f'{dataset_name}-{subset_name}' |
eval_logger.info(f'Loading prompt from {category_name} for {dataset_full_name}') |
if category_name == 'promptsource': |
try: |
from promptsource.templates import DatasetTemplates |
except ModuleNotFoundError: |
raise Exception('Tried to load a Promptsource template, but promptsource is not installed ', 'please install promptsource via pip install lm-eval[promptsource] or pip install -e .[promptsource]') |
try: |
if subset_name is None: |
prompts = DatasetTemplates(dataset_name=dataset_name) |
else: |
prompts = DatasetTemplates(dataset_name=dataset_name, subset_name=subset_name) |
except Exception: |
raise ValueError(f'{dataset_name} and {subset_name} not found') |
if prompt_name in prompts.all_template_names: |
return prompts[prompt_name] |
else: |
raise ValueError(f'{prompt_name} not in prompt list {prompts.all_template_names}') |
elif '.yaml' in category_name: |
import yaml |
with open(category_name, 'rb') as file: |
prompt_yaml_file = yaml.full_load(file) |
prompt_string = prompt_yaml_file['prompts'][prompt_name] |
return PromptString(prompt_string) |
else: |
try: |
return PROMPT_REGISTRY[category_name][prompt_name] |
except Exception: |
raise ValueError(f'expected only a single `:` as separator between prompt category and name, but got `{prompt_id}` instead') |
def load_prompt_list(use_prompt: str, dataset_name=None, subset_name=None, yaml_path=None, **kwargs): |
(category_name, prompt_name) = use_prompt.split(':') |
if category_name == 'promptsource': |
from promptsource.templates import DatasetTemplates |
if subset_name is None: |
prompts = DatasetTemplates(dataset_name=dataset_name) |
else: |
prompts = DatasetTemplates(dataset_name=dataset_name, subset_name=subset_name) |
prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) |
elif '.yaml' in category_name: |
import yaml |
if yaml_path is not None: |
category_name = os.path.realpath(os.path.join(yaml_path, category_name)) |
with open(category_name, 'rb') as file: |
prompt_yaml_file = yaml.full_load(file) |
prompt_list = utils.pattern_match(prompt_name, prompt_yaml_file['prompts'].keys()) |
return [':'.join([category_name, prompt]) for prompt in prompt_list] |
class PromptString: |
def __init__(self, prompt_string): |
self.prompt_string = prompt_string |
def apply(self, doc): |
doc_to_text = self.prompt_string['doc_to_text'] |
doc_to_target = self.prompt_string['doc_to_target'] |
if 'doc_to_choice' in self.prompt_string: |
raise Exception('Not yet implemented to accept doc_to_choice') |
text_string = utils.apply_template(doc_to_text, doc) |
target_string = utils.apply_template(doc_to_target, doc) |
return [text_string, target_string] |
# File: lm-evaluation-harness-main/lm_eval/tasks/__init__.py |
import collections |
import inspect |
import logging |
import os |
from functools import partial |
from typing import Dict, List, Mapping, Optional, Union |
from lm_eval import utils |
from lm_eval.api.group import ConfigurableGroup, GroupConfig |
from lm_eval.api.task import ConfigurableTask, Task |
from lm_eval.evaluator_utils import get_subtask_list |
GROUP_ONLY_KEYS = list(GroupConfig().to_dict().keys()) |
class TaskManager: |
def __init__(self, verbosity='INFO', include_path: Optional[Union[str, List]]=None, include_defaults: bool=True) -> None: |
self.verbosity = verbosity |
self.include_path = include_path |
self.logger = utils.eval_logger |
self.logger.setLevel(getattr(logging, f'{verbosity}')) |
self._task_index = self.initialize_tasks(include_path=include_path, include_defaults=include_defaults) |
self._all_tasks = sorted(list(self._task_index.keys())) |
self._all_groups = sorted([x for x in self._all_tasks if self._task_index[x]['type'] == 'group']) |
self._all_subtasks = sorted([x for x in self._all_tasks if self._task_index[x]['type'] == 'task']) |
self._all_tags = sorted([x for x in self._all_tasks if self._task_index[x]['type'] == 'tag']) |
self.task_group_map = collections.defaultdict(list) |
def initialize_tasks(self, include_path: Optional[Union[str, List]]=None, include_defaults: bool=True): |
if include_defaults: |
all_paths = [os.path.dirname(os.path.abspath(__file__)) + '/'] |
else: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.