Update app.py
Browse files
app.py
CHANGED
@@ -13,19 +13,19 @@ from llama_rope_scaled_monkey_patch import replace_llama_rope_with_scaled_rope
|
|
13 |
replace_llama_rope_with_scaled_rope()
|
14 |
base_model = "Neko-Institute-of-Science/LLaMA-65B-HF"
|
15 |
lora_weights = LoraConfig(
|
16 |
-
auto_mapping=
|
17 |
base_model_name_or_path="Neko-Institute-of-Science/LLaMA-65B-HF",
|
18 |
-
bias=
|
19 |
-
fan_in_fan_out=
|
20 |
-
inference_mode=
|
21 |
-
init_lora_weights=
|
22 |
-
layers_pattern=
|
23 |
-
layers_to_transform=
|
24 |
lora_alpha=16,
|
25 |
lora_dropout=0.05,
|
26 |
-
modules_to_save=
|
27 |
peft_type="LORA",
|
28 |
-
revision=
|
29 |
target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"],
|
30 |
task_type="CAUSAL_LM",
|
31 |
)
|
|
|
13 |
replace_llama_rope_with_scaled_rope()
|
14 |
base_model = "Neko-Institute-of-Science/LLaMA-65B-HF"
|
15 |
lora_weights = LoraConfig(
|
16 |
+
auto_mapping=None,
|
17 |
base_model_name_or_path="Neko-Institute-of-Science/LLaMA-65B-HF",
|
18 |
+
bias=None,
|
19 |
+
fan_in_fan_out=False,
|
20 |
+
inference_mode=True,
|
21 |
+
init_lora_weights=True,
|
22 |
+
layers_pattern=None,
|
23 |
+
layers_to_transform=None,
|
24 |
lora_alpha=16,
|
25 |
lora_dropout=0.05,
|
26 |
+
modules_to_save=None,
|
27 |
peft_type="LORA",
|
28 |
+
revision=None,
|
29 |
target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj"],
|
30 |
task_type="CAUSAL_LM",
|
31 |
)
|