mamkkl commited on
Commit
7c4de1b
·
verified ·
1 Parent(s): f7f320d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -13,19 +13,19 @@ from llama_rope_scaled_monkey_patch import replace_llama_rope_with_scaled_rope
13
  replace_llama_rope_with_scaled_rope()
14
  base_model = "Neko-Institute-of-Science/LLaMA-65B-HF"
15
  lora_weights = LoraConfig(
16
- auto_mapping=null,
17
  base_model_name_or_path="Neko-Institute-of-Science/LLaMA-65B-HF",
18
  bias=none,
19
  fan_in_fan_out=false,
20
  inference_mode=true,
21
  init_lora_weights=true,
22
- layers_pattern=null,
23
- layers_to_transform=null,
24
  lora_alpha=16,
25
  lora_dropout=0.05,
26
- modules_to_save=null,
27
  peft_type="LORA",
28
- revision=null,
29
  target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"],
30
  task_type="CAUSAL_LM",
31
  )
 
13
  replace_llama_rope_with_scaled_rope()
14
  base_model = "Neko-Institute-of-Science/LLaMA-65B-HF"
15
  lora_weights = LoraConfig(
16
+ auto_mapping=none,
17
  base_model_name_or_path="Neko-Institute-of-Science/LLaMA-65B-HF",
18
  bias=none,
19
  fan_in_fan_out=false,
20
  inference_mode=true,
21
  init_lora_weights=true,
22
+ layers_pattern=none,
23
+ layers_to_transform=none,
24
  lora_alpha=16,
25
  lora_dropout=0.05,
26
+ modules_to_save=none,
27
  peft_type="LORA",
28
+ revision=none,
29
  target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"],
30
  task_type="CAUSAL_LM",
31
  )