Update configs.py
Browse files- configs.py +33 -0
configs.py
CHANGED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## info
|
2 |
+
dataset_info = [
|
3 |
+
{'name': 'Commonsense', 'hf_repo': 'tau/commonsense_qa', 'text_col': 'question'},
|
4 |
+
{'name': 'Factual Recall', 'hf_repo': 'azhx/counterfact-filtered-gptj6b', 'text_col': 'subject+predicate',
|
5 |
+
'filter': lambda x: x['label'] == 1},
|
6 |
+
# {'name': 'Physical Understanding', 'hf_repo': 'piqa', 'text_col': 'goal'},
|
7 |
+
{'name': 'Social Reasoning', 'hf_repo': 'ProlificAI/social-reasoning-rlhf', 'text_col': 'question'}
|
8 |
+
]
|
9 |
+
|
10 |
+
|
11 |
+
model_info = {
|
12 |
+
'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', device_map='cpu', token=os.environ['hf_token'],
|
13 |
+
original_prompt_template='<s>{prompt}',
|
14 |
+
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
15 |
+
), # , load_in_8bit=True
|
16 |
+
|
17 |
+
# 'Gemma-2B': dict(model_path='google/gemma-2b', device_map='cpu', token=os.environ['hf_token'],
|
18 |
+
# original_prompt_template='<bos>{prompt}',
|
19 |
+
# interpretation_prompt_template='<bos>User: [X]\n\nAnswer: {prompt}',
|
20 |
+
# ),
|
21 |
+
|
22 |
+
'Mistral-7B Instruct': dict(model_path='mistralai/Mistral-7B-Instruct-v0.2', device_map='cpu',
|
23 |
+
original_prompt_template='<s>{prompt}',
|
24 |
+
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
25 |
+
),
|
26 |
+
|
27 |
+
# 'TheBloke/Mistral-7B-Instruct-v0.2-GGUF': dict(model_file='mistral-7b-instruct-v0.2.Q5_K_S.gguf',
|
28 |
+
# tokenizer='mistralai/Mistral-7B-Instruct-v0.2',
|
29 |
+
# model_type='llama', hf=True, ctransformers=True,
|
30 |
+
# original_prompt_template='<s>[INST] {prompt} [/INST]',
|
31 |
+
# interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
32 |
+
# )
|
33 |
+
}
|