Update README.md
Browse files
README.md
CHANGED
@@ -18,7 +18,7 @@ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False
|
|
18 |
|
19 |
|
20 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
21 |
-
model_name = "
|
22 |
max_seq_length = max_seq_length,
|
23 |
dtype = dtype,
|
24 |
load_in_4bit = load_in_4bit,
|
|
|
18 |
|
19 |
|
20 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
21 |
+
model_name = "M-Chimiste/Llama-3-8B-prime-graph-exp-1_merged",
|
22 |
max_seq_length = max_seq_length,
|
23 |
dtype = dtype,
|
24 |
load_in_4bit = load_in_4bit,
|