Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,13 +11,7 @@ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
|
11 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
12 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
13 |
|
14 |
-
model, tokenizer = AutoModelForCausalLM.from_pretrained(
|
15 |
-
model_name = "Qwen/Qwen2-1.5B",
|
16 |
-
max_seq_length = max_seq_length,
|
17 |
-
dtype = dtype,
|
18 |
-
load_in_4bit = load_in_4bit
|
19 |
-
)
|
20 |
-
|
21 |
model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
|
22 |
|
23 |
def generate_text(prompt):
|
|
|
11 |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
12 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
13 |
|
14 |
+
model, tokenizer = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-1.5B", max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit)
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
|
16 |
|
17 |
def generate_text(prompt):
|