dar-tau commited on
Commit
6a634a2
·
verified ·
1 Parent(s): 529784a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -12,7 +12,8 @@ from interpret import InterpretationPrompt
12
  MAX_PROMPT_TOKENS = 60
13
 
14
  ## info
15
- dataset_info = [{'name': 'Commonsense', 'hf_repo': 'tau/commonsense_qa', 'text_col': 'question'},
 
16
  {'name': 'Factual Recall', 'hf_repo': 'azhx/counterfact-filtered-gptj6b', 'text_col': 'subject+predicate',
17
  'filter': lambda x: x['label'] == 1},
18
  {'name': 'Physical Understanding', 'hf_repo': 'piqa', 'text_col': 'goal'},
@@ -20,7 +21,6 @@ dataset_info = [{'name': 'Commonsense', 'hf_repo': 'tau/commonsense_qa', 'text_c
20
  ]
21
 
22
 
23
-
24
  model_info = {
25
  'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', device_map='cpu', token=os.environ['hf_token'],
26
  original_prompt_template='<s>[INST] {prompt} [/INST]',
 
12
  MAX_PROMPT_TOKENS = 60
13
 
14
  ## info
15
+ dataset_info = [
16
+ {'name': 'Commonsense', 'hf_repo': 'tau/commonsense_qa', 'text_col': 'question'},
17
  {'name': 'Factual Recall', 'hf_repo': 'azhx/counterfact-filtered-gptj6b', 'text_col': 'subject+predicate',
18
  'filter': lambda x: x['label'] == 1},
19
  {'name': 'Physical Understanding', 'hf_repo': 'piqa', 'text_col': 'goal'},
 
21
  ]
22
 
23
 
 
24
  model_info = {
25
  'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', device_map='cpu', token=os.environ['hf_token'],
26
  original_prompt_template='<s>[INST] {prompt} [/INST]',