dar-tau commited on
Commit
b76e9de
·
verified ·
1 Parent(s): f8fba1a

Update configs.py

Browse files
Files changed (1) hide show
  1. configs.py +1 -0
configs.py CHANGED
@@ -22,6 +22,7 @@ model_info = {
22
  layers_format=llama_layers_format),
23
  'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf',
24
  token=os.environ['hf_token'], torch_dtype=torch.float16,
 
25
  # device_map='auto', max_memory={0: "15GB", 1: "30GB"}, dont_cuda=True, # load_in_8bit=True,
26
  original_prompt_template='<s>{prompt}',
27
  interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
 
22
  layers_format=llama_layers_format),
23
  'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf',
24
  token=os.environ['hf_token'], torch_dtype=torch.float16,
25
+ wait_with_hidden_states=True,
26
  # device_map='auto', max_memory={0: "15GB", 1: "30GB"}, dont_cuda=True, # load_in_8bit=True,
27
  original_prompt_template='<s>{prompt}',
28
  interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',