desert
commited on
Commit
·
fa564e5
1
Parent(s):
3398624
del
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import torch
|
|
| 6 |
# Load your model and tokenizer (make sure to adjust the path to where your model is stored)
|
| 7 |
max_seq_length = 2048 # Adjust as necessary
|
| 8 |
load_in_4bit = True # Enable 4-bit quantization for reduced memory usage
|
| 9 |
-
model_path = "
|
| 10 |
|
| 11 |
# Load the model and tokenizer
|
| 12 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
|
|
|
| 6 |
# Load your model and tokenizer (make sure to adjust the path to where your model is stored)
|
| 7 |
max_seq_length = 2048 # Adjust as necessary
|
| 8 |
load_in_4bit = True # Enable 4-bit quantization for reduced memory usage
|
| 9 |
+
model_path = "llama_lora_model_1" # Path to your custom model
|
| 10 |
|
| 11 |
# Load the model and tokenizer
|
| 12 |
model, tokenizer = FastLanguageModel.from_pretrained(
|