mamkkl commited on
Commit
d3f094d
·
verified ·
1 Parent(s): 7c4de1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -13,19 +13,19 @@ from llama_rope_scaled_monkey_patch import replace_llama_rope_with_scaled_rope
13
  replace_llama_rope_with_scaled_rope()
14
  base_model = "Neko-Institute-of-Science/LLaMA-65B-HF"
15
  lora_weights = LoraConfig(
16
- auto_mapping=none,
17
  base_model_name_or_path="Neko-Institute-of-Science/LLaMA-65B-HF",
18
- bias=none,
19
- fan_in_fan_out=false,
20
- inference_mode=true,
21
- init_lora_weights=true,
22
- layers_pattern=none,
23
- layers_to_transform=none,
24
  lora_alpha=16,
25
  lora_dropout=0.05,
26
- modules_to_save=none,
27
  peft_type="LORA",
28
- revision=none,
29
  target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"],
30
  task_type="CAUSAL_LM",
31
  )
 
13
  replace_llama_rope_with_scaled_rope()
14
  base_model = "Neko-Institute-of-Science/LLaMA-65B-HF"
15
  lora_weights = LoraConfig(
16
+ auto_mapping=None,
17
  base_model_name_or_path="Neko-Institute-of-Science/LLaMA-65B-HF",
18
+ bias=None,
19
+ fan_in_fan_out=False,
20
+ inference_mode=True,
21
+ init_lora_weights=True,
22
+ layers_pattern=None,
23
+ layers_to_transform=None,
24
  lora_alpha=16,
25
  lora_dropout=0.05,
26
+ modules_to_save=None,
27
  peft_type="LORA",
28
+ revision=None,
29
  target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"],
30
  task_type="CAUSAL_LM",
31
  )