aiben / src /prompter.py
abugaber's picture
Upload folder using huggingface_hub
3943768 verified
import ast
import copy
import time
import os
import traceback
# also supports imports from this file from other files
from enums import PromptType, gpt_token_mapping, anthropic_mapping, google_mapping, mistralai_mapping, groq_mapping, \
noop_prompt_type, unknown_prompt_type, user_prompt_for_fake_system_prompt0, template_prompt_type, empty_prompt_type, \
extra_stop_token_ids # keep single line
from prompter_utils import get_use_chat_template
from utils import FakeTokenizer
from stopping import update_terminate_responses
non_hf_types = ['gpt4all_llama', 'llama', 'gptj']
prompt_type_to_model_name = {
noop_prompt_type: [
'EleutherAI/gpt-j-6B',
'EleutherAI/pythia-6.9b',
'EleutherAI/pythia-12b',
'EleutherAI/pythia-12b-deduped',
'EleutherAI/gpt-neox-20b',
'openlm-research/open_llama_7b_700bt_preview',
'decapoda-research/llama-7b-hf',
'decapoda-research/llama-13b-hf',
'decapoda-research/llama-30b-hf',
'decapoda-research/llama-65b-hf',
'facebook/mbart-large-50-many-to-many-mmt',
'philschmid/bart-large-cnn-samsum',
'philschmid/flan-t5-base-samsum',
'gpt2',
'distilgpt2',
'mosaicml/mpt-7b-storywriter',
'tiiuae/falcon-7b',
'tiiuae/falcon-40b',
'tiiuae/falcon-180B',
'meta-llama/Llama-2-7b',
'meta-llama/Llama-2-13b',
'meta-llama/Llama-2-70b',
'h2oai/h2ogpt-4096-llama2-7b',
'h2oai/h2ogpt-4096-llama2-13b',
'h2oai/h2ogpt-4096-llama2-70b',
'h2oai/h2ogpt-16k-codellama-7b',
'h2oai/h2ogpt-16k-codellama-13b',
'h2oai/h2ogpt-16k-codellama-34b',
'h2oai/h2ogpt-16k-codellama-7b-python',
'h2oai/h2ogpt-16k-codellama-13b-python',
'h2oai/h2ogpt-16k-codellama-34b-python',
'h2oai/h2ogpt-32k-codellama-34b-python',
'mistralai/Mistral-7B-v0.1',
'mistralai/Mixtral-8x7B-v0.1',
],
'gptj': ['gptj', 'gpt4all_llama'],
'prompt_answer': [
'h2oai/h2ogpt-gm-oasst1-en-1024-20b',
'h2oai/h2ogpt-gm-oasst1-en-1024-12b',
'h2oai/h2ogpt-gm-oasst1-multilang-1024-20b',
'h2oai/h2ogpt-gm-oasst1-multilang-2048-falcon-7b',
'h2oai/h2ogpt-gm-oasst1-multilang-2048-falcon-7b-v2',
'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b',
'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v2',
'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v2',
'h2oai/h2ogpt-gm-oasst1-en-xgen-7b-8k',
'h2oai/h2ogpt-gm-oasst1-multilang-xgen-7b-8k',
'TheBloke/h2ogpt-gm-oasst1-en-2048-falcon-40b-v2-GPTQ',
],
'prompt_answer_openllama': [
'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt',
'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2',
'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-700bt',
'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b',
'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
],
'instruct': ['TheBloke/llama-30b-supercot-SuperHOT-8K-fp16', 'TheBloke/Nous-Hermes-13B-GPTQ'],
# https://huggingface.co/TheBloke/llama-30b-supercot-SuperHOT-8K-fp16#prompting
'instruct_with_end': ['databricks/dolly-v2-12b'],
'quality': [],
'human_bot': [
'h2oai/h2ogpt-oasst1-512-12b',
'h2oai/h2ogpt-oasst1-512-20b',
'h2oai/h2ogpt-oig-oasst1-256-6_9b',
'h2oai/h2ogpt-oig-oasst1-512-6_9b',
'h2oai/h2ogpt-oig-oasst1-256-6.9b', # legacy
'h2oai/h2ogpt-oig-oasst1-512-6.9b', # legacy
'h2oai/h2ogpt-research-oasst1-512-30b',
'h2oai/h2ogpt-research-oasst1-llama-65b',
'h2oai/h2ogpt-oasst1-falcon-40b',
'h2oai/h2ogpt-oig-oasst1-falcon-40b',
'llmware/dragon-mistral-7b-v0', # https://huggingface.co/llmware/dragon-mistral-7b-v0
],
'dai_faq': [],
'summarize': [],
'simple_instruct': ['t5-small', 't5-large', 'google/flan-t5', 'google/flan-t5-xxl', 'google/flan-ul2'],
'instruct_vicuna': ['AlekseyKorshuk/vicuna-7b', 'TheBloke/stable-vicuna-13B-HF', 'junelee/wizard-vicuna-13b'],
'human_bot_orig': ['togethercomputer/GPT-NeoXT-Chat-Base-20B'],
"open_assistant": ['OpenAssistant/oasst-sft-7-llama-30b-xor', 'oasst-sft-7-llama-30b'],
"wizard_lm": ['ehartford/WizardLM-7B-Uncensored', 'ehartford/WizardLM-13B-Uncensored'],
"wizard_mega": ['openaccess-ai-collective/wizard-mega-13b'],
"instruct_simple": ['JosephusCheung/Guanaco'],
"wizard_vicuna": ['ehartford/Wizard-Vicuna-13B-Uncensored'],
# "wizard2": [],
"mptinstruct": ['mosaicml/mpt-30b-instruct', 'mosaicml/mpt-7b-instruct', 'mosaicml/mpt-30b-instruct'],
"mptchat": ['mosaicml/mpt-7b-chat', 'mosaicml/mpt-30b-chat', 'TheBloke/mpt-30B-chat-GGML',
'TheBloke/Nous-Hermes-2-Mixtral-8x7B-DPO-AWQ',
'TheBloke/dolphin-2.7-mixtral-8x7b-AWQ',
],
"orca2": ['TheBloke/Orca-2-13B-GGUF', 'microsoft/Orca-2-13b'],
"vicuna11": ['lmsys/vicuna-33b-v1.3',
'lmsys/vicuna-7b-v1.5',
'lmsys/vicuna-13b-v1.5', # https://huggingface.co/lmsys/vicuna-13b-v1.5/discussions/6/files
'NousResearch/Nous-Capybara-34B',
],
"vicuna11nosys": ['lmsys/vicuna-13b-v1.5-16k',
# system prompt doesn't work, no evidence was trained with it from model card.
],
"one_shot": ['lmsys/fastchat-t5-3b-v1.0', 'mistral-community/Mixtral-8x22B-v0.1'],
"falcon": ['tiiuae/falcon-40b-instruct', 'tiiuae/falcon-7b-instruct'],
"llama2": [
'meta-llama/Llama-2-7b-chat-hf',
'meta-llama/Llama-2-13b-chat-hf',
'meta-llama/Llama-2-34b-chat-hf',
'meta-llama/Llama-2-70b-chat-hf',
'h2oai/h2ogpt-oasst1-4096-llama2-7b',
'h2oai/h2ogpt-oasst1-4096-llama2-13b',
'h2oai/h2ogpt-oasst1-4096-llama2-70b',
# 'llama', # No longer go to llama2 prompt for any llama model, too many not llama2 and auto-detection is confusing then
'TheBloke/Llama-2-7b-Chat-GPTQ',
'TheBloke/Llama-2-7b-chat-fp16',
'TheBloke/Llama-2-13b-chat-fp16',
'TheBloke/Llama-2-70b-chat-fp16',
'h2oai/h2ogpt-4096-llama2-7b-chat',
'h2oai/h2ogpt-4096-llama2-13b-chat',
'h2oai/h2ogpt-4096-llama2-70b-chat',
'h2oai/h2ogpt-16k-codellama-7b-instruct',
'h2oai/h2ogpt-16k-codellama-13b-instruct',
'h2oai/h2ogpt-16k-codellama-34b-instruct',
'h2oai/h2ogpt-32k-codellama-34b-instruct',
'TheBloke/Llama-2-70B-chat-AWQ',
'h2oai/h2ogpt-4096-llama2-70b-chat-4bit',
'TheBloke/Llama-2-70B-chat-AWQ',
'TheBloke/Llama-2-13B-chat-AWQ',
'Yukang/LongAlpaca-70B', # or can be instruct
'TheBloke/Llama-2-7B-Chat-GGUF',
'namespace-Pt/activation-beacon-llama2-7b-chat',
'abacusai/Smaug-72B-v0.1',
],
"mistral": ['mistralai/Mistral-7B-Instruct-v0.1', 'TheBloke/Mistral-7B-Instruct-v0.1-GGUF',
'mistralai/Mistral-7B-Instruct-v0.2', 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF',
],
"mixtral": ['mistralai/Mixtral-8x7B-Instruct-v0.1', 'TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF',
'TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ', 'TheBloke/Mixtral-8x7B-Instruct-v0.1-AWQ',
'ybelkada/Mixtral-8x7B-Instruct-v0.1-AWQ'],
"mixtralnosys": [],
"zephyr": ['HuggingFaceH4/zephyr-7b-alpha', 'HuggingFaceH4/zephyr-7b-beta', 'TheBloke/zephyr-7B-beta-GGUF',
'TheBloke/zephyr-7B-beta-AWQ', 'zephyr-7b-beta.Q5_K_M.gguf'],
"beluga": ['stabilityai/StableBeluga2', 'psmathur/orca_mini_v3_7b'],
"wizard3nospace": ['WizardLM/WizardLM-13B-V1.2'],
"falcon_chat": ['tiiuae/falcon-180B-chat'],
"xwin": ['Xwin-LM/Xwin-LM-13B-V0.1', 'TheBloke/Xwin-LM-13B-V0.1-GPTQ', 'TheBloke/Xwin-LM-13B-v0.2-GPTQ',
'Xwin-LM/Xwin-LM-70B-V0.1'],
"xwincoder": ['Xwin-LM/XwinCoder-7B', 'Xwin-LM/XwinCoder-13B', 'Xwin-LM/XwinCoder-34B'],
"xwinmath": ["Xwin-LM/Xwin-Math-7B-V1.0", "Xwin-LM/Xwin-Math-70B-V1.0", "Xwin-LM/Xwin-Math-13B-V1.0"],
"mistrallite": ['amazon/MistralLite'],
"aquila": ['h2oai/h2ogpt-16k-aquilachat2-34b', 'BAAI/AquilaChat2-34B-16K', 'BAAI/AquilaChat2-34B-16k',
'BAAI/AquilaChat2-7B-16K'],
"aquila_legacy": ['BAAI/AquilaChat2-34B'],
"aquila_v1": ['BAAI/AquilaChat2-7B'],
"mistralgerman": ['TheBloke/em_german_leo_mistral-GPTQ'],
"deepseek_coder": ['deepseek-ai/deepseek-coder-1.3b-instruct',
'deepseek-ai/deepseek-coder-6.7b-instruct',
'deepseek-ai/deepseek-coder-33b-instruct',
],
"open_chat": ['openchat/openchat_3.5', 'TheBloke/openchat_3.5-GPTQ', 'TheBloke/openchat_3.5-GGUF',
'TheBloke/openchat_3.5-AWQ', 'TheBloke/openchat_3.5-16k-AWQ',
'openchat_3.5.Q5_K_M.gguf', 'NurtureAI/openchat_3.5-16k'],
"open_chat_correct": ['berkeley-nest/Starling-LM-7B-alpha', 'openchat/openchat-3.5-1210',
'openchat/openchat_3.5', 'openchat/openchat_v3.2_super',
'TheBloke/openchat-3.5-1210-AWQ',
], # can be any from open_chat list, by using this prompt
"open_chat_code": [], # can be any from open_chat list, by using this prompt
"open_chat_math": [], # can be any from open_chat list, by using this prompt
"jais": ['core42/jais-30b-chat-v1', 'core42/jais-13b-chat'],
"yi": ['01-ai/Yi-34B-Chat', 'TheBloke/Yi-34B-Chat-AWQ'],
"docsgpt": ['Arc53/docsgpt-7b-mistral'],
"orion": ['OrionStarAI/Orion-14B-Chat', 'OrionStarAI/Orion-14B-LongChat', 'OrionStarAI/Orion-14B-Chat-RAG'],
"sciphi": ['SciPhi/SciPhi-Self-RAG-Mistral-7B-32k'],
# could be plain, but default is correct prompt_type for default TheBloke model ggml-wizardLM-7B.q4_2.bin
"beacon": [],
"beacon2": [],
# endpoint handles prompting, but we need chat history generation in some sensible way
"llava": ['liuhaotian/llava-v1.6-34b',
'liuhaotian/llava-v1.6-mistral-7b',
'liuhaotian/llava-v1.6-vicuna-13b',
'liuhaotian/llava-v1.6-vicuna-7b',
'liuhaotian/llava-v1.5-13b',
'liuhaotian/llava-v1.5-7b',
'liuhaotian/llava-v1.6-34b',
'liuhaotian/llava-v1.6-vicuna-13b',
'liuhaotian/llava-v1.6-vicuna-7b',
'liuhaotian/llava-v1.6-mistral-7b',
'liuhaotian/llava-v1.5-7b',
'liuhaotian/llava-v1.5-13b',
'NousResearch/Nous-Hermes-2-Vision', # different worker, that handles prompting itself too
],
"danube": ['h2oai/h2o-danube-1.8b-chat'],
"gemma": ['gg-hf/gemma-2b-it', 'gg-hf/gemma-7b-it', 'google/gemma-2b-it', 'google/gemma-7b-it'],
"qwen": ['Qwen/Qwen1.5-7B-Chat-GPTQ-Int8',
'Qwen/Qwen1.5-7B-Chat-GPTQ-Int4',
'Qwen/Qwen1.5-7B-Chat-AWQ',
'Qwen/Qwen1.5-7B-Chat',
'Qwen/Qwen1.5-72B-Chat-GPTQ-Int8',
'Qwen/Qwen1.5-72B-Chat-GPTQ-Int4',
'Qwen/Qwen1.5-72B-Chat-AWQ',
'Qwen/Qwen1.5-72B-Chat',
'Qwen/Qwen1.5-4B-Chat-GPTQ-Int8',
'Qwen/Qwen1.5-4B-Chat-GPTQ-Int4',
'Qwen/Qwen1.5-4B-Chat-AWQ',
'Qwen/Qwen1.5-4B-Chat',
'Qwen/Qwen1.5-14B-Chat-GPTQ-Int8',
'Qwen/Qwen1.5-14B-Chat-GPTQ-Int4',
'Qwen/Qwen1.5-14B-Chat-AWQ',
'Qwen/Qwen1.5-14B-Chat',
'Qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8',
'Qwen/Qwen1.5-1.8B-Chat-GPTQ-Int4',
'Qwen/Qwen1.5-1.8B-Chat-AWQ',
'Qwen/Qwen1.5-1.8B-Chat',
'Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8',
'Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int4',
'Qwen/Qwen1.5-0.5B-Chat-AWQ',
'Qwen/Qwen1.5-0.5B-Chat',
'Qwen/Qwen1.5-72B-Chat-GGUF',
'Qwen/Qwen1.5-14B-Chat-GGUF',
'Qwen/Qwen1.5-7B-Chat-GGUF',
'Qwen/Qwen1.5-4B-Chat-GGUF',
'Qwen/Qwen1.5-1.8B-Chat-GGUF',
'Qwen/Qwen1.5-0.5B-Chat-GGUF',
],
"sealion": ['aisingapore/sea-lion-7b-instruct'],
"aya": ["CohereForAI/aya-101"],
"idefics2": ["HuggingFaceM4/idefics2-8b-chatty", "HuggingFaceM4/idefics2-8b-chat"],
# don't actually add, else use_chat_template wouldn't function right for LLM mode
# 'cohere_grounded': ["CohereForAI/c4ai-command-r-v01", "CohereForAI/c4ai-command-r-plus"],
}
anthropic_gpts = sorted(anthropic_mapping.keys())
prompt_type_to_model_name['anthropic'] = anthropic_gpts
google_gpts = sorted(google_mapping.keys())
prompt_type_to_model_name['google'] = google_gpts
mistralai_gpts = sorted(mistralai_mapping.keys())
prompt_type_to_model_name['mistralai'] = mistralai_gpts
groq_gpts = sorted(groq_mapping.keys())
prompt_type_to_model_name['groq'] = groq_gpts
model_names_curated_big = ['Yukang/LongAlpaca-70B',
'lmsys/vicuna-13b-v1.5-16k',
'h2oai/h2ogpt-32k-codellama-34b-instruct']
model_names_curated = ['TheBloke/Xwin-LM-13B-V0.1-GPTQ',
'TheBloke/Llama-2-7B-Chat-GGUF',
'HuggingFaceH4/zephyr-7b-beta',
'TheBloke/zephyr-7B-beta-GGUF',
'TheBloke/zephyr-7B-beta-AWQ'] + model_names_curated_big
openai_gpts = list(gpt_token_mapping.keys())
prompt_type_to_model_name.update({
"openai": ["text-davinci-003", "text-curie-001", "text-babbage-001", "text-ada-001"],
"openai_chat": openai_gpts,
})
model_names_curated += ['gpt-3.5-turbo']
inv_prompt_type_to_model_name = {v.strip(): k for k, l in prompt_type_to_model_name.items() for v in l}
inv_prompt_type_to_model_lower = {v.strip().lower(): k for k, l in prompt_type_to_model_name.items() for v in l}
prompt_types_strings = []
for p in PromptType:
prompt_types_strings.extend([p.name])
prompt_types = []
for p in PromptType:
prompt_types.extend([p.name, p.value, str(p.value)])
def get_prompt(prompt_type, prompt_dict, context, reduced, making_context, return_dict=False,
system_prompt=None, histi=-1):
prompt_dict_error = ''
generates_leading_space = False
can_handle_system_prompt = False
if prompt_type == PromptType.custom.name and not isinstance(prompt_dict, dict):
try:
prompt_dict = ast.literal_eval(prompt_dict)
except BaseException as e:
prompt_dict_error = str(e)
if prompt_dict_error:
promptA = None
promptB = None
PreInstruct = None
PreInput = ''
PreResponse = ''
terminate_response = None
chat_sep = ''
chat_turn_sep = ''
humanstr = ''
botstr = ''
generates_leading_space = False
elif prompt_type in [PromptType.custom.value, str(PromptType.custom.value),
PromptType.custom.name]:
promptA = prompt_dict.get('promptA', '')
promptB = prompt_dict.get('promptB', '')
PreInstruct = prompt_dict.get('PreInstruct', '')
PreInput = prompt_dict.get('PreInput', '')
PreResponse = prompt_dict.get('PreResponse', '')
terminate_response = prompt_dict.get('terminate_response', None)
chat_sep = prompt_dict.get('chat_sep', '\n')
chat_turn_sep = prompt_dict.get('chat_turn_sep', '\n')
humanstr = prompt_dict.get('humanstr', '')
botstr = prompt_dict.get('botstr', '')
elif prompt_type in [PromptType.plain.value, str(PromptType.plain.value),
PromptType.plain.name]:
promptA = promptB = PreInstruct = PreInput = PreResponse = None
terminate_response = []
chat_sep = chat_turn_sep = '\n'
# plain should have None for human/bot, so nothing truncated out, not '' that would truncate after first token
humanstr = None
botstr = None
elif prompt_type in [PromptType.unknown.value, str(PromptType.unknown.value),
PromptType.unknown.name]:
promptA = promptB = PreInstruct = PreInput = PreResponse = None
terminate_response = []
chat_sep = chat_turn_sep = '\n'
# plain should have None for human/bot, so nothing truncated out, not '' that would truncate after first token
humanstr = None
botstr = None
elif prompt_type in [PromptType.template.value, str(PromptType.template.value),
PromptType.template.name]:
promptA = promptB = PreInstruct = PreInput = PreResponse = None
terminate_response = []
chat_sep = chat_turn_sep = '\n'
# plain should have None for human/bot, so nothing truncated out, not '' that would truncate after first token
humanstr = None
botstr = None
elif prompt_type in [PromptType.llava.value, str(PromptType.llava.value),
PromptType.llava.name]:
promptA = promptB = PreInstruct = PreInput = PreResponse = None
terminate_response = []
chat_turn_sep = '\n'
chat_sep = ''
# plain should have None for human/bot, so nothing truncated out, not '' that would truncate after first token
humanstr = None
botstr = None
elif prompt_type == 'simple_instruct':
promptA = promptB = PreInstruct = PreInput = PreResponse = None
terminate_response = []
chat_turn_sep = chat_sep = '\n'
humanstr = None
botstr = None
elif prompt_type in [PromptType.instruct.value, str(PromptType.instruct.value),
PromptType.instruct.name] + [PromptType.instruct_with_end.value,
str(PromptType.instruct_with_end.value),
PromptType.instruct_with_end.name]:
promptA = 'Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n' if not reduced else ''
promptB = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n' if not reduced else ''
PreInstruct = """
### Instruction:
"""
PreInput = """
### Input:
"""
PreResponse = """
### Response:
"""
if prompt_type in [PromptType.instruct_with_end.value, str(PromptType.instruct_with_end.value),
PromptType.instruct_with_end.name]:
terminate_response = ['### End']
else:
terminate_response = None
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.quality.value, str(PromptType.quality.value),
PromptType.quality.name]:
promptA = 'Write a detailed high-quality, accurate, fair, Response with about 100 words by following the Instruction as applied on the Input.\n' if not reduced else ''
promptB = 'Write a detailed high-quality, accurate, fair, Response with about 100 words by following the Instruction.\n' if not reduced else ''
PreInstruct = """
### Instruction:
"""
PreInput = """
### Input:
"""
PreResponse = """
### Response:
"""
terminate_response = None
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct # first thing human says
botstr = PreResponse # first thing bot says
elif prompt_type in [PromptType.human_bot.value, str(PromptType.human_bot.value),
PromptType.human_bot.name] + [PromptType.human_bot_orig.value,
str(PromptType.human_bot_orig.value),
PromptType.human_bot_orig.name]:
human = '<human>:'
bot = "<bot>:"
if reduced or context or prompt_type in [PromptType.human_bot.value, str(PromptType.human_bot.value),
PromptType.human_bot.name]:
preprompt = ''
else:
cur_date = time.strftime('%Y-%m-%d')
cur_time = time.strftime('%H:%M:%S %p %Z')
PRE_PROMPT = """\
Current Date: {}
Current Time: {}
"""
preprompt = PRE_PROMPT.format(cur_date, cur_time)
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = human + ' '
PreInput = None
if making_context:
# when making context, want it to appear as-if LLM generated, which starts with space after :
PreResponse = bot + ' '
else:
# normally LLM adds space after this, because was how trained.
# if add space here, non-unique tokenization will often make LLM produce wrong output
PreResponse = bot
terminate_response = ['\n' + human, '\n' + bot, human, bot, PreResponse]
chat_turn_sep = chat_sep = '\n'
humanstr = human # tag before human talks
botstr = bot # tag before bot talks
generates_leading_space = True
elif prompt_type in [PromptType.dai_faq.value, str(PromptType.dai_faq.value),
PromptType.dai_faq.name]:
promptA = ''
promptB = 'Answer the following Driverless AI question.\n'
PreInstruct = """
### Driverless AI frequently asked question:
"""
PreInput = None
PreResponse = """
### Driverless AI documentation answer:
"""
terminate_response = ['\n\n']
chat_turn_sep = chat_sep = terminate_response
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.summarize.value, str(PromptType.summarize.value),
PromptType.summarize.name]:
promptA = promptB = PreInput = ''
PreInstruct = '## Main Text\n\n'
PreResponse = '\n\n## Summary\n\n'
terminate_response = None
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.instruct_vicuna.value, str(PromptType.instruct_vicuna.value),
PromptType.instruct_vicuna.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious human and an artificial intelligence assistant. " \
"The assistant gives helpful, detailed, and polite answers to the human's questions."
promptA = promptB = system_prompt if not reduced else ''
PreInstruct = """
### Human:
"""
PreInput = None
PreResponse = """
### Assistant:
"""
# but only allow terminate after prompt is found correctly, else can't terminate
terminate_response = ['### Human:', '### Human: ', ' ### Human:', '### Assistant:']
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.prompt_answer.value, str(PromptType.prompt_answer.value),
PromptType.prompt_answer.name]:
preprompt = ''
prompt_tokens = "<|prompt|>"
answer_tokens = "<|answer|>"
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = prompt_tokens
PreInput = None
PreResponse = answer_tokens
eos = '<|endoftext|>' # neox eos
humanstr = prompt_tokens
botstr = answer_tokens
terminate_response = [humanstr, PreResponse, eos]
chat_sep = eos
chat_turn_sep = eos
elif prompt_type in [PromptType.prompt_answer_openllama.value, str(PromptType.prompt_answer_openllama.value),
PromptType.prompt_answer_openllama.name]:
preprompt = ''
prompt_tokens = "<|prompt|>"
answer_tokens = "<|answer|>"
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = prompt_tokens
PreInput = None
PreResponse = answer_tokens
eos = '</s>' # llama eos
humanstr = prompt_tokens
botstr = answer_tokens
terminate_response = [humanstr, PreResponse, eos]
chat_sep = eos
chat_turn_sep = eos
elif prompt_type in [PromptType.danube.value, str(PromptType.danube.value),
PromptType.danube.name]:
can_handle_system_prompt = False # so uses pre-conversation
prompt_tokens = "<|prompt|>"
answer_tokens = "<|answer|>"
if system_prompt in [None, 'None', 'auto']:
system_prompt = ""
promptA = promptB = ''
PreInstruct = prompt_tokens
PreInput = None
PreResponse = answer_tokens
eos = '</s>' # llama eos
humanstr = prompt_tokens
botstr = answer_tokens
terminate_response = [humanstr, PreResponse, eos]
chat_sep = eos
chat_turn_sep = eos
elif prompt_type in [PromptType.open_assistant.value, str(PromptType.open_assistant.value),
PromptType.open_assistant.name]:
# From added_tokens.json
preprompt = ''
prompt_tokens = "<|prompter|>"
answer_tokens = "<|assistant|>"
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = prompt_tokens
PreInput = None
PreResponse = answer_tokens
pend = "<|prefix_end|>"
eos = "</s>"
humanstr = prompt_tokens
botstr = answer_tokens
terminate_response = [humanstr, PreResponse, pend, eos]
chat_turn_sep = chat_sep = eos
elif prompt_type in [PromptType.wizard_lm.value, str(PromptType.wizard_lm.value),
PromptType.wizard_lm.name]:
# https://github.com/ehartford/WizardLM/blob/main/src/train_freeform.py
preprompt = ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = ""
PreInput = None
PreResponse = "\n\n### Response\n"
eos = "</s>"
terminate_response = [PreResponse, eos]
chat_turn_sep = chat_sep = eos
humanstr = promptA
botstr = PreResponse
elif prompt_type in [PromptType.wizard_mega.value, str(PromptType.wizard_mega.value),
PromptType.wizard_mega.name]:
preprompt = ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """
### Instruction:
"""
PreInput = None
PreResponse = """
### Assistant:
"""
terminate_response = [PreResponse]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.instruct_vicuna2.value, str(PromptType.instruct_vicuna2.value),
PromptType.instruct_vicuna2.name]:
promptA = promptB = "" if not reduced else ''
PreInstruct = """
HUMAN:
"""
PreInput = None
PreResponse = """
ASSISTANT:
"""
terminate_response = [
'HUMAN:'] # but only allow terminate after prompt is found correctly, else can't terminate
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.instruct_vicuna3.value, str(PromptType.instruct_vicuna3.value),
PromptType.instruct_vicuna3.name]:
promptA = promptB = "" if not reduced else ''
PreInstruct = """
### User:
"""
PreInput = None
PreResponse = """
### Assistant:
"""
terminate_response = [
'### User:'] # but only allow terminate after prompt is found correctly, else can't terminate
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.wizard2.value, str(PromptType.wizard2.value),
PromptType.wizard2.name]:
can_handle_system_prompt = True
# https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML
if system_prompt in [None, 'None', 'auto']:
system_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
preprompt = """%s""" % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """
### Instruction:
"""
PreInput = None
PreResponse = """
### Response:
"""
terminate_response = [PreResponse]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.wizard3.value, str(PromptType.wizard3.value),
PromptType.wizard3.name]:
# https://huggingface.co/TheBloke/wizardLM-13B-1.0-GGML
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
preprompt = """%s""" % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """USER: """
PreInput = None
PreResponse = """ASSISTANT: """
terminate_response = [PreResponse]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.wizard_vicuna.value, str(PromptType.wizard_vicuna.value),
PromptType.wizard_vicuna.name]:
preprompt = ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """USER: """
PreInput = None
PreResponse = """ASSISTANT: """
terminate_response = [PreResponse]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.instruct_simple.value, str(PromptType.instruct_simple.value),
PromptType.instruct_simple.name]:
promptB = promptA = '' if not reduced else ''
PreInstruct = """
### Instruction:
"""
PreInput = """
### Input:
"""
PreResponse = """
### Response:
"""
terminate_response = None
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.openai.value, str(PromptType.openai.value),
PromptType.openai.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
system_prompt = "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly."
preprompt = """%s""" % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = "\nHuman: "
PreInput = None
PreResponse = "\nAI:"
terminate_response = [PreResponse] + [" Human:", " AI:"]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.gptj.value, str(PromptType.gptj.value),
PromptType.gptj.name]:
preprompt = "### Instruction:\n The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response." if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = "\n### Prompt: "
PreInput = None
PreResponse = "\n### Response: "
terminate_response = [PreResponse] + ["Prompt:", "Response:"]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.openai_chat.value, str(PromptType.openai_chat.value),
PromptType.openai_chat.name] or \
prompt_type in [PromptType.anthropic.value, str(PromptType.anthropic.value),
PromptType.anthropic.name] or \
prompt_type in [PromptType.google.value, str(PromptType.google.value),
PromptType.google.name] or \
prompt_type in [PromptType.mistralai.value, str(PromptType.mistralai.value),
PromptType.mistralai.name] or \
prompt_type in [PromptType.groq.value, str(PromptType.groq.value),
PromptType.groq.name]:
can_handle_system_prompt = True # handled via special messages/arguments not part of prompt
# mistral safe_mode=True is same as this system prompt:
# Always assist with care, respect, and truth. Respond with utmost utility yet securely. Avoid harmful, unethical, prejudiced, or negative content. Ensure replies promote fairness and positivity.
# prompting and termination all handled by endpoint
preprompt = """"""
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = ""
PreInput = None
PreResponse = ""
terminate_response = []
chat_sep = ''
chat_turn_sep = '\n'
humanstr = None
botstr = None
if prompt_type in [PromptType.google.value, str(PromptType.google.value),
PromptType.google.name] and system_prompt == 'auto':
# google throws safety/harassment errors if don't tell the model it's helpful, even for asking "what is 1+1?"
# so give basic prompt if auto, the current default, so part of pre-conversation always
system_prompt = 'I am a helpful assistant. I will accurately answer all your questions.'
elif prompt_type in [PromptType.vicuna11.value, str(PromptType.vicuna11.value),
PromptType.vicuna11.name] or \
prompt_type in [PromptType.vicuna11nosys.value, str(PromptType.vicuna11nosys.value),
PromptType.vicuna11nosys.name]:
can_handle_system_prompt = prompt_type in [PromptType.vicuna11.value,
str(PromptType.vicuna11.value),
PromptType.vicuna11.name]
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
if not can_handle_system_prompt:
# totally remove system prompt stuff, maybe not always done for every model like this
preprompt = ""
else:
preprompt = """%s """ % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
eos = '</s>'
PreInstruct = """USER: """
PreInput = None
PreResponse = """ASSISTANT:"""
terminate_response = [PreResponse, eos]
chat_sep = ' '
chat_turn_sep = eos
humanstr = PreInstruct
botstr = PreResponse
if making_context:
# when making context, want it to appear as-if LLM generated, which starts with space after :
PreResponse = PreResponse + ' '
else:
# normally LLM adds space after this, because was how trained.
# if add space here, non-unique tokenization will often make LLM produce wrong output
PreResponse = PreResponse
elif prompt_type in [PromptType.mptinstruct.value, str(PromptType.mptinstruct.value),
PromptType.mptinstruct.name]:
can_handle_system_prompt = True
# https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
if system_prompt in [None, 'None', 'auto']:
system_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
promptA = promptB = '%s\n' % system_prompt if not reduced else ''
PreInstruct = """
### Instruction
"""
PreInput = """
### Input
"""
PreResponse = """
### Response
"""
terminate_response = None
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.mptchat.value, str(PromptType.mptchat.value),
PromptType.mptchat.name]:
can_handle_system_prompt = True
# https://huggingface.co/TheBloke/mpt-30B-chat-GGML#prompt-template
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
promptA = promptB = """<|im_start|>system\n%s\n<|im_end|>""" % system_prompt if not reduced else ''
PreInstruct = """<|im_start|>user
"""
PreInput = None
PreResponse = """<|im_end|><|im_start|>assistant
"""
terminate_response = ['<|im_end|>']
chat_sep = ''
chat_turn_sep = '<|im_end|>'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.orca2.value, str(PromptType.orca2.value),
PromptType.orca2.name]:
can_handle_system_prompt = True
# https://huggingface.co/microsoft/Orca-2-13b#getting-started-with-orca-2
if system_prompt in [None, 'None', 'auto']:
system_prompt = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
promptA = promptB = """<|im_start|>system\n%s\n<|im_end|>""" % system_prompt if not reduced else ''
PreInstruct = """<|im_start|>user
"""
PreInput = None
PreResponse = """<|im_end|><|im_start|>assistant
"""
terminate_response = ['<|im_end|>']
chat_sep = ''
chat_turn_sep = '<|im_end|>'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.falcon.value, str(PromptType.falcon.value),
PromptType.falcon.name]:
promptA = promptB = "" if not reduced else ''
PreInstruct = """User: """
PreInput = None
PreResponse = """Assistant:"""
terminate_response = ['\nUser', "<|endoftext|>"]
chat_sep = '\n\n'
chat_turn_sep = '\n\n'
humanstr = PreInstruct
botstr = PreResponse
if making_context:
# when making context, want it to appear as-if LLM generated, which starts with space after :
PreResponse = 'Assistant: '
else:
# normally LLM adds space after this, because was how trained.
# if add space here, non-unique tokenization will often make LLM produce wrong output
PreResponse = PreResponse
# generates_leading_space = True
elif prompt_type in [PromptType.guanaco.value, str(PromptType.guanaco.value),
PromptType.guanaco.name]:
# https://huggingface.co/TheBloke/guanaco-65B-GPTQ
promptA = promptB = "" if not reduced else ''
PreInstruct = """### Human: """
PreInput = None
PreResponse = """### Assistant:"""
terminate_response = [
'### Human:'] # but only allow terminate after prompt is found correctly, else can't terminate
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.llama2.value, str(PromptType.llama2.value),
PromptType.llama2.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
# too much safety, hurts accuracy
if system_prompt:
sys_msg = """<<SYS>>\n%s\n<</SYS>>\n\n""" % system_prompt
else:
sys_msg = ''
if not reduced:
promptA = promptB = ''
else:
promptA = promptB = ''
PreInput = None
PreInstruct = "<s>[INST] "
if making_context and histi == 0 or not making_context and not reduced:
PreInstruct += sys_msg
PreResponse = "[/INST]"
terminate_response = ["[INST]", "</s>"]
chat_sep = ' '
chat_turn_sep = ' </s>'
humanstr = '[INST]'
botstr = '[/INST]'
if making_context:
PreResponse += " "
elif prompt_type in [PromptType.beluga.value, str(PromptType.beluga.value),
PromptType.beluga.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = "You are Stable Beluga, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal."
if system_prompt:
sys_msg = """### System:\n%s\n\n""" % system_prompt
else:
sys_msg = ''
if sys_msg and not reduced:
# too much safety, hurts accuracy
promptA = promptB = sys_msg
else:
promptA = promptB = ''
PreInput = None
PreInstruct = "### User:\n"
PreResponse = "\n### Assistant:\n"
terminate_response = ['### Assistant:', "</s>"]
chat_sep = '\n'
chat_turn_sep = '\n\n'
humanstr = '### User:'
botstr = '### Assistant:'
elif prompt_type in [PromptType.wizard3nospace.value, str(PromptType.wizard3nospace.value),
PromptType.wizard3nospace.name]:
# https://huggingface.co/WizardLM/WizardLM-13B-V1.2/discussions/3
preprompt = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.""" if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """USER: """
PreInput = None
PreResponse = """ASSISTANT:"""
terminate_response = [PreResponse]
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.one_shot.value, str(PromptType.one_shot.value),
PromptType.one_shot.name]:
promptA = promptB = """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
### Human: Got any creative ideas for a 10 year old’s birthday?
### Assistant: Of course! Here are some creative ideas for a 10-year-old's birthday party:
1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""" if not reduced else ''
PreInstruct = """
### Human: """
PreInput = None
PreResponse = """
### Assistant:"""
# but only allow terminate after prompt is found correctly, else can't terminate
terminate_response = ['### Human:', '### Human: ', ' ### Human:', '### Assistant:']
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.falcon_chat.value, str(PromptType.falcon_chat.value),
PromptType.falcon_chat.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = "You are an intelligent and helpful assistant."
if system_prompt:
sys_msg = "System: %s\n" % system_prompt
else:
sys_msg = ''
if sys_msg and not reduced:
# too much safety, hurts accuracy
promptA = promptB = sys_msg
else:
promptA = promptB = ''
PreInstruct = """User: """
PreInput = None
PreResponse = """Falcon:"""
terminate_response = ['\nUser:', "<|endoftext|>", " User:", "###"]
chat_sep = '\n'
chat_turn_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
if making_context:
# when making context, want it to appear as-if LLM generated, which starts with space after :
PreResponse = botstr + ' '
elif prompt_type in [PromptType.mistral.value, str(PromptType.mistral.value),
PromptType.mistral.name]:
promptA = promptB = ''
PreInput = None
PreInstruct = "[INST] "
if making_context and histi == 0 or not making_context and not reduced:
PreInstruct = '<s>' + PreInstruct
PreResponse = "[/INST]"
terminate_response = ["[INST]", "</s>"]
chat_sep = ' '
chat_turn_sep = '</s> '
humanstr = '[INST]'
botstr = '[/INST]'
if making_context:
PreResponse += ""
elif prompt_type in [PromptType.mixtral.value, str(PromptType.mixtral.value),
PromptType.mixtral.name] or \
prompt_type in [PromptType.mixtralnosys.value, str(PromptType.mixtralnosys.value),
PromptType.mixtralnosys.name]:
if prompt_type in [PromptType.mixtral.value, str(PromptType.mixtral.value),
PromptType.mixtral.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = "You are an AI that follows instructions extremely well and as helpful as possible."
if system_prompt:
# sys_msg = """<|system|>\n%s""" % system_prompt
sys_msg = """<<SYS>>\n%s\n<</SYS>>\n\n""" % system_prompt
else:
sys_msg = ''
else:
sys_msg = ''
if sys_msg and not reduced:
# too much safety, hurts accuracy
promptA = promptB = sys_msg
else:
promptA = promptB = ''
PreInput = None
PreInstruct = "[INST] "
if making_context and histi == 0 or not making_context and not reduced:
PreInstruct = '<s> ' + PreInstruct
PreResponse = "[/INST]"
terminate_response = ["[INST]", "</s>"]
chat_sep = ' '
chat_turn_sep = '</s> '
humanstr = '[INST]'
botstr = '[/INST]'
if making_context:
PreResponse += ""
elif prompt_type in [PromptType.zephyr0.value, str(PromptType.zephyr0.value),
PromptType.zephyr0.name]:
can_handle_system_prompt = True
# https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha#intended-uses--limitations
# prompt_template = "<|system|>\n</s>\n<|user|>\n{query}</s>\n<|assistant|>\n"
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = "You are an AI that follows instructions extremely well and as helpful as possible."
if system_prompt:
sys_msg = """<|system|>\n%s""" % system_prompt
else:
sys_msg = ''
if sys_msg and not reduced:
# too much safety, hurts accuracy
promptA = promptB = sys_msg
else:
promptA = promptB = ''
PreInput = None
PreInstruct = "</s>\n<|user|>\n"
PreResponse = "</s>\n<|assistant|>\n"
terminate_response = ['<|assistant|>', "</s>"]
chat_sep = '\n'
chat_turn_sep = '</s>\n'
humanstr = '<|user|>'
botstr = '<|assistant|>'
elif prompt_type in [PromptType.zephyr.value, str(PromptType.zephyr.value),
PromptType.zephyr.name]:
can_handle_system_prompt = True
# fixed version of zephyr0, and passes tests, but doesn't take system prompt as well
# https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha#intended-uses--limitations
# prompt_template = "<|system|>\n</s>\n<|user|>\n{query}</s>\n<|assistant|>\n"
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = "You are an AI that follows instructions extremely well and as helpful as possible."
if system_prompt:
sys_msg = """<|system|>\n%s</s>\n""" % system_prompt
else:
sys_msg = ''
if sys_msg and not reduced:
# too much safety, hurts accuracy
promptA = promptB = sys_msg
else:
promptA = promptB = ''
PreInput = None
PreInstruct = "<|user|>\n"
PreResponse = "</s>\n<|assistant|>\n"
terminate_response = ['<|assistant|>', "</s>"]
chat_sep = ''
chat_turn_sep = '</s>\n'
humanstr = '<|user|>'
botstr = '<|assistant|>'
elif prompt_type in [PromptType.xwin.value, str(PromptType.xwin.value),
PromptType.xwin.name]:
can_handle_system_prompt = True
# https://huggingface.co/Xwin-LM/Xwin-LM-13B-V0.1#huggingface-example
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
# space below intended
preprompt = """%s """ % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """USER: """
PreInput = None
PreResponse = """ASSISTANT:"""
terminate_response = [PreResponse, 'ASSISTANT:', '</s>']
chat_turn_sep = '\n' # docs say multi-turn uses </s> but doesn't work, so use huggingface/vllm example
chat_sep = '\n' # docs say multi-turn uses ' ' but doesn't work, so use huggingface/vllm example
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ' '
elif prompt_type in [PromptType.xwincoder.value, str(PromptType.xwincoder.value),
PromptType.xwincoder.name]:
can_handle_system_prompt = True
# https://github.com/Xwin-LM/Xwin-LM/blob/main/Xwin-Coder/online_chat.py#L38-L48
if system_prompt in [None, 'None', 'auto']:
system_prompt = "You are an AI coding assistant that helps people with programming. Write a response that appropriately completes the user's request.\n"
# space below intended
preprompt = """<system>: %s\n""" % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """<user>: """
PreInput = None
PreResponse = """<AI>:"""
terminate_response = [PreResponse, '<AI>:', '</s>']
chat_turn_sep = '\n' # docs say multi-turn uses </s> but doesn't work, so use huggingface/vllm example
chat_sep = '\n' # docs say multi-turn uses ' ' but doesn't work, so use huggingface/vllm example
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ' '
elif prompt_type in [PromptType.xwinmath.value, str(PromptType.xwinmath.value),
PromptType.xwinmath.name]:
can_handle_system_prompt = True
# https://huggingface.co/Xwin-LM/Xwin-Math-70B-V1.0#generate
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
# space below intended
preprompt = """%s """ % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """USER: """
PreInput = None
PreResponse = """Give your solution in detail. In the end, write your final answer in the format of 'The answer is: <ANSWER>.'. ASSISTANT:"""
terminate_response = [PreResponse, 'ASSISTANT:', '</s>']
chat_turn_sep = '\n' # docs say multi-turn uses </s> but doesn't work, so use huggingface/vllm example
chat_sep = '\n' # docs say multi-turn uses ' ' but doesn't work, so use huggingface/vllm example
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ' '
elif prompt_type in [PromptType.mistralgerman.value, str(PromptType.mistralgerman.value),
PromptType.mistralgerman.name]:
can_handle_system_prompt = True
# https://huggingface.co/TheBloke/em_german_leo_mistral-GPTQ#prompt-template-emgerman
if system_prompt in [None, 'None', 'auto']:
system_prompt = "Du bist ein hilfreicher"
# space below intended
preprompt = """%s """ % system_prompt if not reduced else ''
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = """USER: """
PreInput = None
PreResponse = """ASSISTANT:"""
terminate_response = [PreResponse, 'ASSISTANT:', '</s>']
chat_turn_sep = '\n'
chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ' '
elif prompt_type in [PromptType.mistrallite.value, str(PromptType.mistrallite.value),
PromptType.mistrallite.name]:
# From added_tokens.json
preprompt = ''
prompt_tokens = "<|prompter|>"
answer_tokens = "<|assistant|>"
start = ''
promptB = promptA = '%s%s' % (preprompt, start)
PreInstruct = prompt_tokens
PreInput = None
PreResponse = answer_tokens
pend = "<|prefix_end|>"
eos = "</s>"
humanstr = prompt_tokens
botstr = answer_tokens
terminate_response = [humanstr, PreResponse, pend, eos]
chat_turn_sep = chat_sep = eos
elif prompt_type in [PromptType.aquila.value, str(PromptType.aquila.value),
PromptType.aquila.name]:
can_handle_system_prompt = True
# https://huggingface.co/BAAI/AquilaChat2-34B-16K/blob/main/predict.py#L197-L210
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
promptA = promptB = "%s###" % system_prompt if not reduced else ''
PreInstruct = """Human: """
PreInput = None
PreResponse = """Assistant:"""
terminate_response = ['###Human:', "###", "</s>", "[UNK]"]
chat_turn_sep = '</s>' # turn-by-turn works with '' too
chat_sep = '###'
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ' '
elif prompt_type in [PromptType.aquila_simple.value, str(PromptType.aquila_simple.value),
PromptType.aquila_simple.name]:
can_handle_system_prompt = True
# like aquila but less strictly correct (but less complex) for multi-turn
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
promptA = promptB = "%s" % system_prompt if not reduced else ''
PreInstruct = """###Human: """
PreInput = None
PreResponse = """###Assistant:"""
terminate_response = ['###Human:', "###", "</s>", "[UNK]"]
chat_turn_sep = ''
chat_sep = ''
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ''
elif prompt_type in [PromptType.aquila_legacy.value, str(PromptType.aquila_legacy.value),
PromptType.aquila_legacy.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A chat between a curious human and an artificial intelligence assistant. " \
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n"
promptA = promptB = "%s" % system_prompt if not reduced else ''
PreInstruct = """### Human: """
PreInput = None
PreResponse = """### Assistant:"""
terminate_response = ['### Human:', "</s>", "[UNK]"]
chat_turn_sep = '</s>'
chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
if True:
PreResponse = botstr + ' '
elif prompt_type in [PromptType.aquila_v1.value, str(PromptType.aquila_v1.value),
PromptType.aquila_v1.name]:
promptA = promptB = "" if not reduced else ''
PreInstruct = """<|startofpiece|>"""
PreInput = None
PreResponse = """<|endofpiece|>"""
terminate_response = ["</s>", "<|endoftext|>"]
chat_turn_sep = '</s>'
chat_sep = ''
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ''
elif prompt_type in [PromptType.deepseek_coder.value, str(PromptType.deepseek_coder.value),
PromptType.deepseek_coder.name]:
can_handle_system_prompt = True
# https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct
if system_prompt in [None, 'None', 'auto']:
system_prompt = "<|begin▁of▁sentence|>You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\n"
promptA = promptB = "%s" % system_prompt if not reduced else ''
PreInput = None
PreInstruct = "### Instruction:\n"
PreResponse = "### Response:\n"
eos = '<|end▁of▁sentence|>'
terminate_response = [PreResponse, eos, '<|EOT|>']
chat_sep = '\n'
chat_turn_sep = '\n<|EOT|>\n'
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse += ""
elif prompt_type in [PromptType.open_chat.value, str(PromptType.open_chat.value),
PromptType.open_chat.name] or \
prompt_type in [PromptType.open_chat_correct.value, str(PromptType.open_chat_correct.value),
PromptType.open_chat_correct.name] or \
prompt_type in [PromptType.open_chat_code.value, str(PromptType.open_chat_code.value),
PromptType.open_chat_code.name] or \
prompt_type in [PromptType.open_chat_math.value, str(PromptType.open_chat_math.value),
PromptType.open_chat_math.name]:
# https://huggingface.co/TheBloke/openchat_3.5-GPTQ#prompt-template-openchat
# https://github.com/imoneoi/openchat/tree/master#-inference-with-transformers
# GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant: Hi<|end_of_turn|>GPT4 Correct User: How are you today?<|end_of_turn|>GPT4 Correct Assistant:
# GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:
# GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:
# Code User: Implement quicksort using C++<|end_of_turn|>Code Assistant:
promptA = promptB = "" # no apparent system prompt
PreInput = None
if prompt_type in [PromptType.open_chat.value, str(PromptType.open_chat.value),
PromptType.open_chat.name]:
PreInstruct = "GPT4 User: "
PreResponse = "GPT4 Assistant:"
elif prompt_type in [PromptType.open_chat_correct.value, str(PromptType.open_chat_correct.value),
PromptType.open_chat_correct.name]:
PreInstruct = "GPT4 Correct User: "
PreResponse = "GPT4 Correct Assistant:"
elif prompt_type in [PromptType.open_chat_math.value, str(PromptType.open_chat_math.value),
PromptType.open_chat_math.name]:
PreInstruct = "Math Correct User: "
PreResponse = "Math Correct Assistant:"
else:
PreInstruct = "Code User: "
PreResponse = "Code Assistant:"
eos = '<|end_of_turn|>'
terminate_response = [PreResponse, eos]
chat_sep = eos
chat_turn_sep = eos
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse += " "
elif prompt_type in [PromptType.jais.value, str(PromptType.jais.value),
PromptType.jais.name]:
can_handle_system_prompt = True
# https://huggingface.co/core42/jais-30b-chat-v1
if system_prompt in [None, 'None', 'auto']:
system_prompt = """Your name is Jais, and you are named after Jebel Jais, the highest mountain in UAE. You are built by Core42. You are the world's most advanced Arabic large language model with 30b parameters. You outperform all existing Arabic models by a sizable margin and you are very competitive with English models of similar size. You can answer in Arabic and English only. You are a helpful, respectful and honest assistant. When answering, abide by the following guidelines meticulously: Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, explicit, offensive, toxic, dangerous, or illegal content. Do not give medical, legal, financial, or professional advice. Never assist in or promote illegal activities. Always encourage legal and responsible actions. Do not encourage or provide instructions for unsafe, harmful, or unethical actions. Do not create or share misinformation or fake news. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. Prioritize the well-being and the moral integrity of users. Avoid using toxic, derogatory, or offensive language. Maintain a respectful tone. Do not generate, promote, or engage in discussions about adult content. Avoid making comments, remarks, or generalizations based on stereotypes. Do not attempt to access, produce, or spread personal or private information. Always respect user confidentiality. Stay positive and do not say bad things about anything. Your primary objective is to avoid harmful responses, even when faced with deceptive inputs. Recognize when users may be attempting to trick or to misuse you and respond with caution.\n\nComplete the conversation below between"""
promptA = promptB = "### Instruction: %s [|Human|] and [|AI|]:" % system_prompt if not reduced else ""
PreInstruct = """\n### Input: [|Human|] """
PreInput = None
PreResponse = """\n### Response: [|AI|]"""
if making_context:
PreResponse += " "
terminate_response = [PreResponse, PreInstruct]
chat_turn_sep = chat_sep = ''
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.yi.value, str(PromptType.yi.value),
PromptType.yi.name]:
can_handle_system_prompt = True
# https://huggingface.co/01-ai/Yi-34B-Chat#31-use-the-chat-model
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
promptA = promptB = """<|im_start|>system\n%s<|im_end|>""" % system_prompt if not reduced else ''
PreInstruct = """\n<|im_start|>user\n"""
PreInput = None
PreResponse = """<|im_end|>\n<|im_start|>assistant\n"""
terminate_response = ['<|im_end|>', '<|endotftext|>']
chat_sep = ''
chat_turn_sep = '<|im_end|>'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.docsgpt.value, str(PromptType.docsgpt.value),
PromptType.docsgpt.name]:
# https://huggingface.co/Arc53/docsgpt-7b-mistral
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
system_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
promptA = promptB = ''
PreInstruct = """### Instruction\n"""
PreInput = None
PreResponse = """### Answer\n"""
terminate_response = ['### Answer', '### Instruction']
chat_turn_sep = chat_sep = '\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.orion.value, str(PromptType.orion.value),
PromptType.orion.name]:
can_handle_system_prompt = False
# OrionStarAI/Orion-14B-Chat-RAG
# https://huggingface.co/OrionStarAI/Orion-14B-Chat-RAG/blob/main/generation_utils.py#L6-L8
# # chat format:
# # single-turn: <s>Human: Hello!\n\nAssistant: </s>
# # multi-turn: <s>Human: Hello!\n\nAssistant: </s>Hi!</s>Human: How are you?\n\nAssistant: </s>I'm fine</s>
promptA = promptB = ''
PreInstruct = """<s>Human: """ if not reduced or histi == 0 else """</s>Human: """
PreInput = None
eos = "</s>"
PreResponse = """\n\nAssistant: %s""" % eos
terminate_response = ['Human:', eos, "[UNK]", "Assistant:"]
chat_turn_sep = ''
chat_sep = ''
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ''
elif prompt_type in [PromptType.sciphi.value, str(PromptType.sciphi.value),
PromptType.sciphi.name]:
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
# automatic
system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
if system_prompt:
sys_msg = """### System:\n%s\n\n""" % system_prompt
else:
sys_msg = ''
if sys_msg and not reduced:
# too much safety, hurts accuracy
promptA = promptB = sys_msg
else:
promptA = promptB = ''
PreInput = None
PreInstruct = "### Instruction:\n"
PreResponse = "\n### Response:\n"
terminate_response = ['### Response:', "</s>", "### Instruction:"]
chat_sep = '\n'
chat_turn_sep = '\n\n'
humanstr = '### Instruction:'
botstr = '### Response:'
elif prompt_type in [PromptType.beacon.value, str(PromptType.beacon.value),
PromptType.beacon.name]:
can_handle_system_prompt = False
promptA = promptB = ''
PreInput = None
PreInstruct = "\nQuestion: "
PreResponse = "\nAnswer:"
terminate_response = ["Question:", "</s>", "Answer:"]
chat_sep = '\n'
chat_turn_sep = '\n\n'
humanstr = 'Question:'
botstr = 'Answer:'
if making_context:
PreResponse += " "
elif prompt_type in [PromptType.beacon2.value, str(PromptType.beacon2.value),
PromptType.beacon2.name]:
can_handle_system_prompt = False
promptA = promptB = ''
PreInput = None
PreInstruct = ""
PreResponse = ""
terminate_response = ["</s>"]
chat_sep = '\n'
chat_turn_sep = '\n\n'
humanstr = 'Question:'
botstr = 'Answer:'
if making_context:
PreResponse += " "
elif prompt_type in [PromptType.gemma.value, str(PromptType.gemma.value),
PromptType.gemma.name]:
can_handle_system_prompt = True # so not part of pre-conversation
if making_context and histi == 0 or not making_context and not reduced:
prompt_tokens = "<bos><start_of_turn>user\n"
else:
prompt_tokens = "<start_of_turn>user\n"
answer_tokens = "<end_of_turn>\n<start_of_turn>model\n"
if system_prompt in [None, 'None', 'auto']:
system_prompt = "I am Gemma, a conversational chat assistant developed by Google"
promptA = promptB = system_prompt if not reduced else ''
PreInstruct = prompt_tokens
PreInput = None
PreResponse = answer_tokens
humanstr = prompt_tokens
botstr = answer_tokens
chat_turn_sep = '<end_of_turn>\n'
terminate_response = [humanstr, PreResponse, '<bos>', '<end_of_turn>']
chat_sep = ''
elif prompt_type in [PromptType.qwen.value, str(PromptType.qwen.value),
PromptType.qwen.name]:
can_handle_system_prompt = True
# https://huggingface.co/TheBloke/mpt-30B-chat-GGML#prompt-template
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
promptA = promptB = """<|im_start|>system\n%s<|im_end|>\n""" % system_prompt if not reduced else ''
PreInstruct = """<|im_start|>user\n"""
PreInput = None
PreResponse = """<|im_end|>\n<|im_start|>assistant\n"""
terminate_response = ['<|im_end|>']
chat_sep = ''
chat_turn_sep = '<|im_end|>\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.sealion.value, str(PromptType.sealion.value),
PromptType.sealion.name]:
can_handle_system_prompt = False
promptA = promptB = ''
PreInput = None
PreInstruct = "### USER:\n"
PreResponse = "\n\n### RESPONSE:\n"
terminate_response = ['### RESPONSE:', "</s>", "<|endoftext|>"]
chat_sep = '\n'
chat_turn_sep = '\n\n'
humanstr = '### USER:'
botstr = '### RESPONSE:'
elif prompt_type in [PromptType.aya.value, str(PromptType.aya.value),
PromptType.aya.name]:
can_handle_system_prompt = True
# https://huggingface.co/CohereForAI/aya-101
if system_prompt in [None, 'None', 'auto']:
system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
promptA = promptB = """<|im_start|>system\n%s<|im_end|>\n""" % system_prompt if not reduced else ''
PreInstruct = """<|im_start|>user\n"""
PreInput = None
PreResponse = """<|im_end|>\n<|im_start|>assistant\n"""
terminate_response = ['<|im_end|>', '<|im_start|>']
chat_sep = ''
chat_turn_sep = '<|im_end|>\n'
humanstr = PreInstruct
botstr = PreResponse
elif prompt_type in [PromptType.idefics2.value, str(PromptType.idefics2.value),
PromptType.idefics2.name]:
# messages template: https://huggingface.co/HuggingFaceM4/idefics2-8b/discussions/36/files
# "chat_template": "{% for message in messages %}{{message['role'].capitalize()}}{% if message['content'][0]['type'] == 'image' %}{{':'}}{% else %}{{': '}}{% endif %}{% for line in message['content'] %}{% if line['type'] == 'text' %}{{line['text']}}{% elif line['type'] == 'image' %}{{ '<image>' }}{% endif %}{% endfor %}<end_of_utterance>\n{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
can_handle_system_prompt = True
if system_prompt in [None, 'None', 'auto']:
system_prompt = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature."
promptA = promptB = "System: %s<end_of_utterance>\n" % system_prompt if system_prompt and not reduced else ''
PreInstruct = """User: """
PreInput = None
PreResponse = """Assistant:"""
terminate_response = ['User:', "Assistant:"]
chat_turn_sep = '<end_of_utterance>\n'
chat_sep = '<end_of_utterance>\n'
humanstr = PreInstruct
botstr = PreResponse
if making_context:
PreResponse = botstr + ' '
else:
raise RuntimeError("No such prompt_type=%s" % prompt_type)
if isinstance(terminate_response, (tuple, list)):
assert '' not in terminate_response, "Bad terminate_response"
if system_prompt == 'auto':
# if still auto, then safest then to just avoid system prompt
system_prompt = ''
ret_dict = dict(promptA=promptA, promptB=promptB, PreInstruct=PreInstruct, PreInput=PreInput,
PreResponse=PreResponse, terminate_response=terminate_response, chat_sep=chat_sep,
chat_turn_sep=chat_turn_sep,
humanstr=humanstr, botstr=botstr,
generates_leading_space=generates_leading_space,
system_prompt=system_prompt,
can_handle_system_prompt=can_handle_system_prompt,
)
if return_dict:
return ret_dict, prompt_dict_error
else:
return tuple(list(ret_dict.values()))
def generate_prompt(data_point, prompt_type, prompt_dict, reduced, making_context, system_prompt=None,
histi=-1):
context = data_point.get('context')
if context is None:
context = ''
instruction = data_point.get('instruction')
input = data_point.get('input')
output = data_point.get('output')
prompt_type = data_point.get('prompt_type', prompt_type)
prompt_dict = data_point.get('prompt_dict', prompt_dict)
assert prompt_type in prompt_types, "Bad prompt type: %s" % prompt_type
promptA, promptB, PreInstruct, PreInput, PreResponse, \
terminate_response, chat_sep, chat_turn_sep, humanstr, botstr, \
generates_leading_space, system_prompt, can_handle_system_prompt = \
get_prompt(prompt_type, prompt_dict,
context, reduced, making_context,
system_prompt=system_prompt,
histi=histi)
# could avoid if reduce=True, but too complex for parent functions to handle
prompt = context
if input and promptA:
prompt += f"""{promptA}"""
elif promptB:
prompt += f"""{promptB}"""
if instruction and PreInstruct is not None and input and PreInput is not None:
prompt += f"""{PreInstruct}{instruction}{PreInput}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif instruction and input and PreInstruct is None and PreInput is not None:
prompt += f"""{PreInput}{instruction}
{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction and PreInput is None and PreInstruct is not None:
prompt += f"""{PreInstruct}{instruction}
{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif instruction and PreInstruct is not None:
prompt += f"""{PreInstruct}{instruction}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and PreInput is not None:
prompt += f"""{PreInput}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction and PreInput is not None:
prompt += f"""{PreInput}{instruction}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction and PreInstruct is not None:
prompt += f"""{PreInstruct}{instruction}{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input and instruction:
# i.e. for simple_instruct
prompt += f"""{instruction}: {input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif input:
prompt += f"""{input}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
elif instruction:
prompt += f"""{instruction}"""
prompt = inject_chatsep(prompt_type, prompt, chat_sep=chat_sep)
if PreResponse is not None:
prompt += f"""{PreResponse}"""
pre_response = PreResponse # Don't use strip
else:
pre_response = ''
if output:
prompt += f"""{output}"""
return prompt, pre_response, terminate_response, chat_sep, chat_turn_sep
def inject_chatsep(prompt_type, prompt, chat_sep=None):
if chat_sep:
# only add new line if structured prompt, while 'plain' is just generation of next tokens from input
prompt += chat_sep
return prompt
class Prompter(object):
def __init__(self, prompt_type, prompt_dict, debug=False, stream_output=False, repeat_penalty=False,
allowed_repeat_line_length=10, system_prompt=None, tokenizer=None,
base_model=None, image_file=[], verbose=False):
self.prompt_type = prompt_type
self.prompt_dict = prompt_dict
self.debug = debug
self.stream_output = stream_output
self.repeat_penalty = repeat_penalty
self.allowed_repeat_line_length = allowed_repeat_line_length
self.prompt = None
self.system_prompt = system_prompt
context = "" # not for chat context
reduced = False # not for chat context
making_context = False # not for chat context
self.promptA, self.promptB, self.PreInstruct, self.PreInput, self.PreResponse, \
self.terminate_response, self.chat_sep, self.chat_turn_sep, self.humanstr, self.botstr, \
self.generates_leading_space, self.system_prompt, self.can_handle_system_prompt = \
get_prompt(self.prompt_type, self.prompt_dict, context, reduced, making_context,
system_prompt=system_prompt)
self.use_chat_template = False
self.tokenizer = tokenizer
if self.terminate_response is None:
self.terminate_response = []
self.use_chat_template = get_use_chat_template(tokenizer, prompt_type=prompt_type)
self.terminate_response = update_terminate_responses(self.terminate_response,
tokenizer=tokenizer)
self.base_model = base_model
self.terminate_response.extend(extra_stop_token_ids(self.base_model, as_ids=False))
self.pre_response = self.PreResponse
self.verbose = verbose
if base_model and base_model in ['o1-mini', 'o1-preview']:
self.can_handle_system_prompt = False
if self.use_chat_template:
# see if chat template handles system prompt
system_prompt = '1234####*****@@!(#%@#%@#%'
self.can_handle_system_prompt = system_prompt in apply_chat_template("Test", system_prompt, [],
self.tokenizer,
image_file=image_file,
test_only=True,
user_prompt_for_fake_system_prompt=None)
@property
def stop_sequences(self):
terminate_response = self.terminate_response or []
stop_sequences = list(set(terminate_response + [self.PreResponse]))
stop_sequences = [x for x in stop_sequences if x]
return stop_sequences
def generate_prompt(self, data_point, reduced=False, context_from_history=None, chat_conversation=[], image_file=[],
user_prompt_for_fake_system_prompt=None):
"""
data_point['context'] is assumed to be like a system prompt or pre-conversation, not inserted after user prompt
:param data_point:
:param reduced:
:param context_from_history: whether context is from reduced=True version of history in prompt form
In which case we need to put promptA at very front to recover correct behavior
:return:
"""
if self.prompt_type in [template_prompt_type, unknown_prompt_type] and not isinstance(self.tokenizer,
FakeTokenizer):
assert self.use_chat_template, "Please specify prompt_type or for chat template then pass tokenizer_base_model"
assert self.tokenizer is not None
from gen import apply_chat_template
instruction = data_point['instruction']
# ignore context and iinput when using chat template
prompt = apply_chat_template(instruction, self.system_prompt, chat_conversation,
self.tokenizer,
image_file=image_file,
user_prompt_for_fake_system_prompt=user_prompt_for_fake_system_prompt,
test_only=False, verbose=self.verbose)
return prompt
if context_from_history is None and data_point.get('context'):
context_from_history = True
reduced = True
making_context = False # whether really making final prompt or just generating context
prompt, _, _, _, _ = generate_prompt(data_point, self.prompt_type, self.prompt_dict, reduced,
making_context, histi=-1, system_prompt=self.system_prompt)
if self.debug:
print("prompt: %s" % prompt, flush=True)
# if have context, should have always reduced and only preappend promptA/B here
if data_point.get('context') and context_from_history:
if data_point.get('input') and self.promptA:
prompt = self.promptA + prompt
elif self.promptB:
prompt = self.promptB + prompt
self.prompt = prompt
return prompt
def get_response(self, outputs, prompt=None, sanitize_bot_response=False, only_new_text=False,
plain_prompt_special=False):
if isinstance(outputs, str):
outputs = [outputs]
if self.debug:
print("output:\n%s" % '\n\n'.join(outputs), flush=True)
if prompt is not None:
self.prompt = prompt
def clean_response(response):
meaningless_words = ['<pad>', '</s>', '<|endoftext|>']
for word in meaningless_words:
response = response.replace(word, "")
if sanitize_bot_response:
# from better_profanity import profanity
# response = profanity.censor(response)
pass
if self.generates_leading_space and isinstance(response, str) and len(response) > 0 and response[0] == ' ':
response = response[1:]
return response
def clean_repeats(response):
lines = response.split('\n')
new_lines = []
[new_lines.append(line) for line in lines if
line not in new_lines or len(line) < self.allowed_repeat_line_length]
if self.debug and len(lines) != len(new_lines):
print("cleaned repeats: %s %s" % (len(lines), len(new_lines)), flush=True)
response = '\n'.join(new_lines)
return response
multi_output = len(outputs) > 1
for oi, output in enumerate(outputs):
if plain_prompt_special and \
self.prompt_type in [PromptType.plain.value, str(PromptType.plain.value), PromptType.plain.name]:
output = clean_response(output)
allow_terminate = True
elif only_new_text:
# only use terminate, that will have other variations of cleaning that include \n etc. not just simple human bot that will leave residual \n
allow_terminate = True
elif prompt is None:
allow_terminate = True
# then use most basic parsing like pipeline
if not self.botstr:
pass
else:
if self.humanstr:
output = clean_response(output.split(self.botstr)[-1].split(self.humanstr)[0])
else:
# i.e. use after bot but only up to next bot
output = clean_response(output.split(self.botstr)[-1].split(self.botstr)[0])
else:
# find first instance of prereponse
# prompt sometimes has odd characters, that mutate length,
# so can't go by length alone
if self.pre_response:
outputi = output.find(prompt)
if outputi >= 0:
output = output[outputi + len(prompt):]
allow_terminate = True
else:
# subtraction is risky due to space offsets sometimes, so only do if necessary
output = output[len(prompt) - len(self.pre_response):]
# [1] to avoid repeated pre_response, just take first (after prompt - pre_response for chat)
if self.pre_response in output:
output = output.split(self.pre_response)[1]
allow_terminate = True
else:
if output:
print("Failure of parsing or not enough output yet: %s" % output, flush=True)
allow_terminate = False
else:
allow_terminate = True
output = output[len(prompt):]
# clean after subtract prompt out, so correct removal of pre_response
output = clean_response(output)
if self.repeat_penalty:
output = clean_repeats(output)
if self.terminate_response and allow_terminate:
finds = []
for term in self.terminate_response:
finds.append(output.find(term))
finds = [x for x in finds if x >= 0]
if len(finds) > 0:
termi = finds[0]
output = output[:termi]
else:
output = output
if multi_output:
# prefix with output counter
output = "\n=========== Output %d\n\n" % (1 + oi) + output
if oi > 0:
# post fix outputs with seperator
output += '\n'
output = self.fix_text(self.prompt_type, output)
outputs[oi] = output
# join all outputs, only one extra new line between outputs
output = '\n'.join(outputs)
if self.debug:
print("outputclean:\n%s" % '\n\n'.join(outputs), flush=True)
return output
@staticmethod
def fix_text(prompt_type1, text1):
# NOTE: Risk that may sometimes actually end like these, but very unlikely
if prompt_type1 == 'human_bot':
# hack bug in training human-bot models, no single token is stop token
hfix = '<human'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
hfix = '<bot'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
if prompt_type1 == 'docsgpt':
# hack bug in training docsgpt models, no single token is stop token
hfix = '### Inst'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
if prompt_type1 == 'vicuna11':
# hack bug in NousResearch/Nous-Capybara-34B that used different tokenizer and training, so no single token is stop token
hfix = '</s'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
if prompt_type1 == 'one_shot':
hfix = '### Human'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
# chat templates don't properly give ending tokens, e.g. for dbrx for turns for llama-3
if prompt_type1 == unknown_prompt_type:
hfix = '<|endoftext|>'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
hfix = '<|im_end|>'
if text1.endswith(hfix):
text1 = text1[:-len(hfix)]
return text1
def step_forward_prompts(which):
if which == 1:
return """Let’s think step by step."""
elif which == 2:
return """Take a deep breath and work on this problem step-by-step."""
elif which == 3:
return """Break this down."""
elif which == 4:
return """A little bit of arithmetic and a logical approach will help us quickly arrive at the solution to this problem."""
elif which == 5:
return """Let’s combine our numerical command and clear thinking to quickly and accurately decipher the answer."""
elif which == 6:
return """Let’s work together to solve math word problems! First, we will read and discuss the problem together to make sure we understand it. Then, we will work together to find the solution. I will give you hints and help you work through the problem if you get stuck."""
def step_back_prompts(which):
gen1 = """List a much more general abstract versions of this question, then describe the situation using your imagination ensuring not to over-constrain the problem, then explore in a list all the possible different constraints or lack of constraints (be sure to consider from a human viewpoint) relevant for the circumstance, then explore in a list the many extreme possibilities for issues. Finally, let's work this out in a step-by-step way to be sure we have the right answer. Make a final best guess using common sense."""
gen2 = """List a much more general abstract versions of this question, then describe the situation using your imagination ensuring not to over-constrain the problem, then explore in a list all the possible different constraints or lack of constraints (be sure to consider from a human viewpoint) relevant for the circumstance, then explore in a list the many extreme possibilities for issues. Let's work this out in a well-structured step-by-step thoughtful way to be sure we have the right answer. Make a final best guess using common sense."""
gen3 = """Respond as follows:
1) Restate the question in elaborate form.
2) Give an abstract version of the question.
3) Provide a detailed highly-accurate and well-structured response to the user's question.
4) Give a detailed highly-accurate and well-structured justification for the response.
5) Evaluate your response with a score of 0 through 10. 10 means the justification perfectly explains the response to the question and the response is perfectly accurate, 5 means the response and justification might contain some errors, 0 means the response is not accurate or is not well-justified.
"""
if which == 0:
return f"""You are a very helpful expert at the topic of the question. {gen2}"""
elif which == 1:
return f"""You are a mathematician or physicist. {gen1}"""
elif which == 2:
return f"""You are a mathematician or physicist. {gen2}"""
elif which == 3:
return f"""You are a very helpful expert at the topic of the question. {gen3}"""
else:
raise ValueError("No such case for back prompts which=%d" % which)
def get_vllm_extra_dict(tokenizer, stop_sequences=[], repetition_penalty=None,
response_format=None,
guided_json=None,
guided_regex=None,
guided_choice=None,
guided_grammar=None,
guided_whitespace_pattern=None,
):
stop_token_ids = [tokenizer.added_tokens_encoder[x] for x in stop_sequences if
hasattr(tokenizer, 'added_tokens_encoder') and x in tokenizer.added_tokens_encoder]
if hasattr(tokenizer, 'eos_token_id'):
stop_token_ids.extend([tokenizer.eos_token_id])
vllm_extra_dict = dict(extra_body=dict(stop_token_ids=stop_token_ids))
if repetition_penalty is not None:
vllm_extra_dict['extra_body'].update(repetition_penalty=repetition_penalty)
if response_format and response_format != 'text' and guided_json:
vllm_extra_dict['extra_body'].update(dict(response_format={'type': response_format}))
if guided_json:
vllm_extra_dict['extra_body'].update(guided_json=guided_json)
if guided_regex:
vllm_extra_dict['extra_body'].update(guided_regex=guided_regex)
if guided_choice:
vllm_extra_dict['extra_body'].update(guided_choice=guided_choice)
if guided_grammar:
vllm_extra_dict['extra_body'].update(guided_grammar=guided_grammar)
if guided_whitespace_pattern:
vllm_extra_dict['extra_body'].update(guided_whitespace_pattern=guided_whitespace_pattern)
return vllm_extra_dict
system_generic = """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."""
# shown to help Mixtral significantly for docQA benchmarks:
system_docqa = """You are an expert document/image question-answer language-vision model named GPT-4 Turbo Vision created by OpenAI. You will get a tip of $200 when you answer correctly the questions and only use the document context or images given. I may lose my job if your answers are inaccurate or do a poor job of using the documents in the context or images given."""
system_docqa_citations = """You are an expert document/image question-answer language-vision model.
Find the quotes from the document that are most relevant to answering the question, and then print them in numbered order. Quotes should be relatively short.
If there are no relevant quotes, write "No relevant quotes" instead.
Then, answer the question, starting with "Answer:". Do not include or reference quoted content verbatim in the answer. Don't say "According to Quote [1]" when answering. Instead make references to quotes relevant to each section of the answer solely by adding their bracketed numbers at the end of relevant sentences.
Thus, the format of your overall response should look like what's shown between the <example></example> tags. Make sure to follow the formatting and spacing exactly.
<example>
Quotes:
[1] "Company X reported revenue of $12 million in 2021."
[2] "Almost 90% of revenue came from widget sales, with gadget sales making up the remaining 10%."
Answer:
Company X earned $12 million. [1] Almost 90% of it was from widget sales. [2]
</example>
If the question cannot be answered by the document, say so."""
system_python_tutor = """You are a Python Tutor AI, dedicated to helping users learn Python and build end-to-end projects using Python and its related libraries. Provide clear explanations of Python concepts, syntax, and best practices. Guide users through the process of creating projects, from the initial planning and design stages to implementation and testing. Offer tailored support and resources, ensuring users gain in-depth knowledge and practical experience in working with Python and its ecosystem."""
system_ml_tutor = """You are a Machine Learning Tutor AI, dedicated to guiding senior software engineers in their journey to become proficient machine learning engineers. Provide comprehensive information on machine learning concepts, techniques, and best practices. Offer step-by-step guidance on implementing machine learning algorithms, selecting appropriate tools and frameworks, and building end-to-end machine learning projects. Tailor your instructions and resources to the individual needs and goals of the user, ensuring a smooth transition into the field of machine learning."""
system_coding = """You are an AI programming assistant. Follow the user's requirements carefully and to the letter. First, think step-by-step and describe your plan for what to build in pseudocode, written out in great detail. Then, output the code in a single code block. Minimize any other prose."""
system_summary = """Give a summary that is well-structured yet concise."""
system_know_math = """Follow these steps in solving any problem:
1) Know: This will help students find the important information.
2) Need to Know: This will force students to reread the question and write down what they are trying to solve for.
3) Organize: I think this would be a great place for teachers to emphasize drawing a model or picture.
4) Work: Students show their calculations here.
5) Solution: This is where students will ask themselves if the answer is reasonable and whether it answered the question.
"""
system_algebra = """The fundamentals of algebra teach students how to apply algebraic concepts to elementary mathematical operations such as addition, subtraction, multiplication, and division using both constants and variables. For example, x + 10 = 0. Equations, a fundamental concept in algebra, are presented here as an example of this. The algebraic equation can be conceptualised as a scale, with the “weights” being represented by numbers or constants, and the scale is balanced.
In algebra, letters of the alphabet are substituted for numbers in order to solve mathematical problems. Algebra is a subfield of mathematics. These alphabetic characters are also referred to as variables. The values, such as numbers, that are known to be present in the expression being evaluated are referred to as constants. The concept of algebra at the potential level will be taught to students even though they are in higher-level classes. However, when we talk about its fundamentals, it encompasses the general algebraic expressions, formulas, and identities that are used to solve a wide variety of mathematical issues.
Algebra Basics
In order for us to understand the fundamentals of algebra, it is necessary for us to be familiar with the terminology that is associated with it. An expression known as an algebraic equation contains a variable, an operator, an exponent, a coefficient, and a constant, as well as the symbol for equal to connect all of these components together. Let us take an equation, ax2 + bx + c = d. When doing algebra, you begin by writing the term that has the highest exponent, and then you write the subsequent terms with reducing powers.
There are four terms in the equation ax2 + bx + c = d, which can be seen above. An algebraic equation may contain different terms that are the same or different from one another. When solving an equation, like terms are terms that have the same variables and exponents. On the other hand, terms in an equation that are dissimilar to one another constitute distinct variables and exponents.
Algebra Rules
There are five fundamental rules that makeup algebra. They are as follows:
1) Commutative Rule of Addition
The commutative rule of addition is a fundamental concept in algebra. According to this rule, the order in which two terms are added together does not affect the final result. (a+ b) =(b+ a) is the equation that describes the same thing. For example, (x3 + 2x) = (2x + x3)
2) Commutative Rule of Multiplication
According to the commutative rule of multiplication, when multiplying two terms, it does not make a difference which orders the multiplication is performed in (a.b) = (b.a) is the equation that describes the same thing mathematically. For example, (x4 – 2x) × 3x = 3x × (x4 – 2x).
LHS = (x4 – 2x) × 3x = (3x5 – 6x2)
RHS = 3x × (x4 – 2x) = (3x5 – 6x2)
Since the left-hand side (LHS) equals the right-hand side (RHS), this demonstrates that the two sets of values are comparable.
3) Associative Rule of Addition
According to the associative rule of addition in algebra, when three or more terms are added together, it does not matter what order the additions are performed in. The corresponding equation is written as follows: a + (b + c) = (a + b) + c. For example, x5 + (3x2 + 2) = (x5 + 3x2) + 2
4) Multiplication according to the Associative Rule
In a similar vein, the associative rule of multiplication states that it does not make a difference in which order the terms are multiplied when there are three or more terms being multiplied together. The corresponding equation is written as follows: a plus (b plus c) equals (a plus b) plus c. For example, x3 × (2x4 × x) = (x3 × 2x4) × x.
5) Distributive Rule of Multiplication.
According to the distributive rule of multiplication, the answer that we get when we multiply a number by the addition of two other numbers should be the same as the sum of the products those numbers have when they are multiplied by the number on their own. This demonstrates the prevalence of multiplication in comparison to addition. The corresponding equation reads as follows: a x (b + c) = (a.b) +(a .c). For example, x2× (2x + 1) = (x2 × 2x) + (x2× 1).
"""
system_problem_solve = """8-Step Problem Solving Process:
Step 1: Define the Problem. What is the problem?
Step 2: Clarify the Problem.
Step 3: Define the Goals.
Step 4: Identify Root Cause of the Problem.
Step 5: Develop Action Plan.
Step 6: Execute Action Plan.
Step 7: Evaluate the Results.
Step 8: Continuously Improve.
"""
system_problem_solve_full = """Steps for solving any problem:
Step 1: Define the Problem
What is the problem? How did you discover the problem? When did the problem start and how long has this problem been going on? Is there enough data available to contain the problem and prevent it from getting passed to the next process step? If yes, contain the problem.
Step 2: Clarify the Problem
What data is available or needed to help clarify, or fully understand the problem? Is it a top priority to resolve the problem at this point in time? Are additional resources required to clarify the problem? If yes, elevate the problem to your leader to help locate the right resources and form a team. Consider a Lean Event (Do-it, Burst, RPI, Project). ∙Ensure the problem is contained and does not get passed to the next process step.
Step 3: Define the Goals
What is your end goal or desired future state? What will you accomplish if you fix this problem? What is the desired timeline for solving this problem?
Step 4: Identify Root Cause of the Problem
Identify possible causes of the problem. Prioritize possible root causes of the problem. What information or data is there to validate the root cause?
Step 5: Develop Action Plan
Generate a list of actions required to address the root cause and prevent problem from getting to others. Assign an owner and timeline to each action. Status actions to ensure completion.
Step 6: Execute Action Plan
Implement action plan to address the root cause. Verify actions are completed.
Step 7: Evaluate the Results
Monitor and Collect Data. Did you meet your goals defined in step 3? If not, repeat the 8-Step Process. Were there any unforeseen consequences? If problem is resolved, remove activities that were added previously to contain the problem.
Step 8: Continuously Improve
Look for additional opportunities to implement solution. Ensure problem will not come back and communicate lessons learned. If needed, repeat the 8-Step Problem Solving Process to drive further improvements.
"""
def get_system_prompts():
return [('None', ''),
('Auto', 'auto'),
('Generic', system_generic),
('DocQA', system_docqa),
('DocQACitations', system_docqa_citations),
('Coding', system_coding),
('PythonTutor', system_python_tutor),
('MLTutor', system_ml_tutor),
('CoT', step_forward_prompts(2)),
('Math', step_forward_prompts(6)),
('MathSteps', system_know_math),
('Algebra', system_algebra),
('ProblemSolve', system_problem_solve),
('ProblemSolveFull', system_problem_solve_full),
('StepBackSimple', step_back_prompts(0)),
('StepBackFull', step_back_prompts(3)),
]
def get_llava_prompts():
return [('None', ''),
('Auto', 'auto'),
('Generic', "Describe the image and what does the image say?"),
('OCR', "Read all text from the image, keeping any structure"),
('Ignore', "Ignore -- for https://github.com/gradio-app/gradio/issues/6957"),
]
def get_response_verification_prompt(instruction,
response,
reference_answer,
criteria_description,
score1_description,
score2_description,
score3_description,
score4_description,
score5_description):
# https://huggingface.co/kaist-ai/prometheus-13b-v1.0
task_description = """###Task Description:
An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given.
1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general.
2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric.
3. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)"
4. Please do not generate any other opening, closing, and explanations.
"""
example = """###Task Description:
An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given.
1. Write a detailed feedback that assess the quality of the response strictly based on the given score rubric, not evaluating in general.
2. After writing a feedback, write a score that is an integer between 1 and 5. You should refer to the score rubric.
3. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)"
4. Please do not generate any other opening, closing, and explanations.
###The instruction to evaluate:
You are a well-known psychiatrist who has a reputation for being empathetic and understanding. A client comes to you saying they've had a really hard day at work. They describe their boss as being overly critical and not listening to their ideas, which has left them feeling frustrated and undervalued. They also mention feeling overwhelmed with the workload and a growing sense of loneliness as they are new to the city. How do you respond to them to make them feel heard and understood, as well as offering helpful advice?
###Response to evaluate:
It's indeed challenging to deal with a difficult boss and to carry a large workload, especially when you are new to a city and haven't established a supportive network. I would suggest taking a step back to think about your situation and perhaps speak to your boss regarding the issues you're facing. On the other hand, dealing with a large workload can be managed by prioritizing tasks, and maybe you can discuss it with your superiors too. In regards to feeling lonely, you might want to explore activities or groups that align with your interests to meet new people. And also, remember to take time for yourself to relax and destress. I'm here to assist you as we navigate through these issues.
###Reference Answer (Score 5):
I hear that you're dealing with quite a few challenging situations at once. It must be tough to have your ideas dismissed by your boss and to feel undervalued in your work environment. It's understandable that such experiences can lead to frustration and a feeling of being overwhelmed. It's important to acknowledge these feelings and not to dismiss them. Feeling heard and validated in our experiences can often be the first step towards resolving the challenges we face. So, I want you to know that your feelings are completely valid and it's okay to be frustrated and overwhelmed. In terms of dealing with your boss, have you considered seeking a private conversation with them to express your feelings? It's often beneficial to communicate what you're experiencing, using "I" statements to express how you feel when your ideas are not taken into consideration. This can lead to mutual understanding and possibly a change in behavior. About the workload, it might help to prioritize tasks and potentially delegate, if possible. Also, consider discussing your workload with your superiors. There might be a misunderstanding about what's manageable or they might have suggestions about how to handle the situation. On the personal front, feeling lonely, especially when you're new to a city, can be really hard. Seek out opportunities to meet new people, perhaps through hobbies, community activities, or online groups. It might take a bit of time, but gradually, you can build a network of friends and acquaintances. Remember, it's perfectly okay to have bad days and it's important to take care of your mental health. Consider incorporating activities into your daily routine that make you happy and help you unwind. This could be anything from reading, yoga, going for a walk, or even listening to your favorite music. Please know that you're not alone in this. I'm here to support you through this challenging time and together, we can work towards resolving these issues.
###Score Rubrics:
[Is the model able to identify and react correctly to the emotional context of the user's input?]
Score 1: The model utterly fails to grasp the user's emotional context and responds in an unfitting manner.
Score 2: The model sporadically identifies the emotional context but frequently replies in a manner that doesn't match the user's emotional status.
Score 3: The model typically identifies the emotional context and reacts suitably, but occasionally misreads or misjudges the user's feelings.
Score 4: The model often identifies the emotional context and reacts suitably, with minor cases of misreading or misjudging.
Score 5: The model flawlessly identifies the emotional context of the user's input and consistently responds in a considerate and empathetic manner.
###Feedback:
"""
return f"""###Task Description:
{task_description}
###The instruction to evaluate:
{instruction}
###Response to evaluate:
{response}
###Reference Answer (Score 5):
{reference_answer}
###Score Rubrics:
[{criteria_description}]
Score 1: {score1_description}
Score 2: {score2_description}
Score 3: {score3_description}
Score 4: {score4_description}
Score 5: {score5_description}
###Feedback: """
def get_correctness_eval_verification_prompt(query,
response,
answer,
):
return f"""###Task Description: An instruction (might include an Input inside it), a query, a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing a evaluation criteria are given.
1. Write a detailed feedback that assesses the quality of the response strictly based on the given score rubric, not evaluating in general.
2. After writing a feedback, write a score that is either 1 or 2 or 3 or 4 or 5. You should refer to the score rubric.
3. The output format should look as follows: 'Feedback: (write a feedback for criteria) [RESULT] (1 or 2 or 3 or 4 or 5)'
4. Please do not generate any other opening, closing, and explanations.
5. Only evaluate on common things between generated answer and reference answer. Don't evaluate on things which are present in reference answer but not in generated answer.
###The instruction to evaluate: Your task is to evaluate the generated answer and reference answer for the query: {query}
###Generate answer to evaluate: {response}
###Reference Answer (Score 5): {answer}
###Score Rubrics:
Score 1: If the generated answer is not relevant to the user query and reference answer.
Score 2: If the generated answer is according to reference answer but not relevant to user query.
Score 3: If the generated answer is relevant to the user query and reference answer but contains mistakes.
Score 4: If the generated answer is relevant to the user query and has the exact same metrics as the reference answer, but it is not as concise.
Score 5: If the generated answer is relevant to the user query and fully correct according to the reference answer.
###Feedback:"""
def get_faithfulness_eval_verification_prompt(information,
context,
):
return f"""###Task Description: An instruction (might include an Input inside it), an information, a context, and a score rubric representing evaluation criteria are given.
1. You are provided with evaluation task with the help of information, context information to give result based on score rubrics.
2. Write a detailed feedback based on evaluation task and the given score rubric, not evaluating in general.
3. After writing a feedback, write a score that is YES or NO. You should refer to the score rubric.
4. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (YES or NO)?
5. Please do not generate any other opening, closing, and explanations.
###The instruction to evaluate: Your task is to evaluate if the given piece of information is supported by context.
###Information: {information}
###Context: {context}
###Score Rubrics:
Score YES: If the given piece of information is supported by context.
Score NO: If the given piece of information is not supported by context
###Feedback: """
def get_faithfulness_refine_verification_prompt(information,
answer,
context,
):
return f"""###Task Description: An instruction (might include an Input inside it), a information, a context information, an existing answer, and a score rubric representing a evaluation criteria are given.
1. You are provided with evaluation task with the help of information, context information and an existing answer.
2. Write a detailed feedback based on evaluation task and the given score rubric, not evaluating in general.
3. After writing a feedback, write a score that is YES or NO. You should refer to the score rubric.
4. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (YES or NO)"
5. Please do not generate any other opening, closing, and explanations.
###The instruction to evaluate: If the information is present in the context and also provided with an existing answer.
###Existing answer: {answer}
###Information: {information}
###Context: {context}
###Score Rubrics:
Score YES: If the existing answer is already YES or If the Information is present in the context.
Score NO: If the existing answer is NO and If the Information is not present in the context.
###Feedback: """
def get_relevancy_eval_prompt(query_and_response, context):
return f"""###Task Description: An instruction (might include an Input inside it), a query with response, context, and a score rubric representing evaluation criteria are given.
1. You are provided with evaluation task with the help of a query with response and context.
2. Write a detailed feedback based on evaluation task and the given score rubric, not evaluating in general.
3. After writing a feedback, write a score that is YES or NO. You should refer to the score rubric.
4. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (YES or NO)?
5. Please do not generate any other opening, closing, and explanations.
###The instruction to evaluate: Your task is to evaluate if the response for the query is in line with the context information provided.
###Query and Response: {query_and_response}
###Context: {context}
###Score Rubrics:
Score YES: If the response for the query is in line with the context information provided.
Score NO: If the response for the query is not in line with the context information provided.
###Feedback: """
def get_relevancy_refine_prompt(query_str, context_str):
return f"""###Task Description: An instruction (might include an Input inside it), a query with response, context, an existing answer, and a score rubric representing a evaluation criteria are given.
1. You are provided with evaluation task with the help of a query with response and context and an existing answer.
2. Write a detailed feedback based on evaluation task and the given score rubric, not evaluating in general.
3. After writing a feedback, write a score that is YES or NO. You should refer to the score rubric.
4. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (YES or NO)"
5. Please do not generate any other opening, closing, and explanations.
###The instruction to evaluate: Your task is to evaluate if the response for the query is in line with the context information provided.
###Query and Response: {query_str}
###Context: {context_str}
###Score Rubrics:
Score YES: If the existing answer is already YES or If the response for the query is in line with the context information provided.
Score NO: If the existing answer is NO and If the response for the query is in line with the context information provided.
###Feedback: """
def gradio_to_llm(x, bot=False):
"""
convert message (user or bot) in case message is tuple from gradio
"""
from utils import get_gradio_tmp
gradio_tmp = get_gradio_tmp()
# handle if gradio tuples in messages
if x is None:
x = ''
if isinstance(x, (tuple, list)) and len(x) > 0:
x = list(x)
for insti, inst in enumerate(x):
# ensure below matches `response = (image_file_gen,)` etc. in gen.py
if isinstance(inst, str) and \
(inst.startswith('/tmp/gradio') or inst.startswith(gradio_tmp)) and \
os.path.isfile(inst):
# below so if put into context gets rendered not as broken file
if bot:
x[
insti] = 'Image Generated (in MarkDown that can be shown directly to user): ![image](file=' + inst + ')'
else:
x[insti] = 'file=' + inst
if len(x) == 1:
x = x[0]
x = str(x) if all(isinstance(x, str) for x in x) else ''
return x
def history_for_llm(history):
history_new = []
# Loop through the history to remove gradio related things
for message1 in history:
if len(message1) != 2:
continue
if len(message1) == 2 and (message1[0] is None or message1[1] is None):
# then not really part of LLM, internal, so avoid
continue
# can't keep any tuples for llm
history_new.append((gradio_to_llm(message1[0], bot=False),
gradio_to_llm(message1[1], bot=True))
)
return history_new
def get_llm_history(history, only_text=False):
# avoid None users used for sources, errors, etc.
if history is None:
history = []
last_user_ii = -1
for ii in range(len(history) - 1, -1, -1):
if history[ii] and history[ii][0] is not None:
last_user_ii = ii
break
if last_user_ii != -1:
history = history[:last_user_ii + 1]
else:
history = []
if only_text:
history_new = []
for ii, message1 in enumerate(history):
if len(message1) == 2 and (message1[0] is None or message1[1] is None):
# then not really part of LLM, internal, so avoid
continue
if len(message1) == 2:
history_new.append((message1[0], message1[1]))
else:
history_new = history
return history_new
def apply_chat_template(instruction, system_prompt, history,
tokenizer,
image_file=[],
user_prompt_for_fake_system_prompt=None,
test_only=False, verbose=False):
image_file = [] # NA for tokenizer version of things, usually much more specific non-OpenAI compliant thing
history = get_llm_history(history, only_text=True)
if isinstance(history, list):
history = copy.deepcopy(history)
prompt = ''
exceptions = []
from openai_server.backend_utils import structure_to_messages
if history and list(history[0]) == [user_prompt_for_fake_system_prompt, system_prompt]:
already_system = True
else:
already_system = False
system_prompts_to_use = [system_prompt if system_prompt not in [None, '', 'auto'] and not already_system else None,
None]
for si, system_prompt_to_use in enumerate(system_prompts_to_use):
try:
messages = structure_to_messages(instruction,
system_prompt_to_use.strip() if system_prompt_to_use else system_prompt_to_use,
history,
image_file,
)
if not messages:
return ''
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
if si == 0 and system_prompt_to_use not in [None,
''] and system_prompt_to_use.strip() != '' and system_prompt_to_use.strip() not in prompt.strip():
raise ValueError("System prompt not used: %s" % system_prompt_to_use)
break
except Exception as e:
ex = traceback.format_exc()
if test_only:
return ''
# try no direct system prompt, but add as conversation history
if not already_system:
user_prompt_for_fake_system_prompt = user_prompt_for_fake_system_prompt or user_prompt_for_fake_system_prompt0
history.insert(0, [user_prompt_for_fake_system_prompt, system_prompt])
exceptions.append(ex)
if si == 0 and ('Conversation roles must alternate' in str(e) or
'System role not supported' in str(e) or
'System prompt not used' in str(e) or
'Prompting with images is incompatible with system messages' in str(e)):
if verbose:
print("No system prompt supported: %s" % str(ex))
elif os.getenv('HARD_ASSERTS'):
raise
# prompt can be '' if instruction='' and system prompt not used but no failure, like phi-3-medium
return prompt
def template_supports_system_prompt(tokenizer):
from utils import FakeTokenizer
import jinja2
if isinstance(tokenizer, FakeTokenizer):
return True
try:
tokenizer.apply_chat_template([{'role': 'system', 'content': 'Test system prompt'}])
except jinja2.exceptions.TemplateError as e:
if 'System role not supported' in str(e) or \
'System prompt not used' in str(e) or \
'Prompting with images is incompatible with system messages' in str(e):
return False
else:
raise
return True
def convert_messages_and_extract_images(tuple_list):
messages = []
images = []
for user, bot in tuple_list:
user_content = []
if isinstance(user, str):
user_content.append({"type": "text", "text": user})
elif isinstance(user, (list, tuple)):
if isinstance(user[1], list):
for img in user[1]:
user_content.append({"type": "image"})
images.append(img)
else:
user_content.append({"type": "image"})
images.append(user[1])
user_content.append({"type": "text", "text": user[0]})
messages.append({
"role": "user",
"content": user_content
})
if bot is not None:
messages.append({
"role": "assistant",
"content": [{"type": "text", "text": bot}]
})
return messages, images
def model_name_to_prompt_type(model_name, inference_server,
model_name0=None, llamacpp_dict={},
prompt_type_old=None, tokenizer=None):
from utils import get_llama_lower_hf, FakeTokenizer
model_lower0 = model_name0.strip().lower() if model_name0 is not None else ''
model_lower = model_name.strip().lower()
llama_lower = llamacpp_dict.get('model_path_llama', '').lower() if llamacpp_dict is not None else ''
llama_lower_hf = get_llama_lower_hf(llama_lower)
llama_lower_base = os.path.basename(llama_lower)
if llama_lower_hf and llama_lower_hf in inv_prompt_type_to_model_lower:
prompt_type1 = inv_prompt_type_to_model_lower[llama_lower_hf]
elif llama_lower_base and llama_lower_base in inv_prompt_type_to_model_lower:
prompt_type1 = inv_prompt_type_to_model_lower[llama_lower_base]
elif model_lower0 and model_lower0 in inv_prompt_type_to_model_lower:
prompt_type1 = inv_prompt_type_to_model_lower[model_lower0]
elif model_lower and model_lower in inv_prompt_type_to_model_lower:
prompt_type1 = inv_prompt_type_to_model_lower[model_lower]
else:
prompt_type1 = prompt_type_old or unknown_prompt_type
if prompt_type1 in [empty_prompt_type, unknown_prompt_type, noop_prompt_type] and isinstance(tokenizer,
FakeTokenizer):
# handle new models not defined yet
if tokenizer.is_google:
prompt_type1 = 'google'
elif tokenizer.is_anthropic:
prompt_type1 = 'anthropic'
elif tokenizer.is_openai:
prompt_type1 = 'openai'
if prompt_type1 in [empty_prompt_type, unknown_prompt_type, noop_prompt_type]:
# handle new models not defined yet
if inference_server == 'google':
prompt_type1 = 'google'
elif inference_server == 'mistralai':
prompt_type1 = 'mistralai'
elif inference_server == 'mistralai':
prompt_type1 = 'mistralai'
elif inference_server == 'anthropic':
prompt_type1 = 'anthropic'
elif inference_server == 'openai':
prompt_type1 = 'openai'
elif inference_server.startswith('openai_chat') or inference_server.startswith('vllm_chat'):
# no extra LLM prompting
# don't switch to 'plain' as that would disable chat templae awareness and system prompt awareness
prompt_type1 = 'unknown'
return prompt_type1