M-Chimiste commited on
Commit
616b453
·
verified ·
1 Parent(s): 0f0f7fb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -18,7 +18,7 @@ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False
18
 
19
 
20
  model, tokenizer = FastLanguageModel.from_pretrained(
21
- model_name = "Hermes-Llama-3-8B-prime-graph-exp-1_merged",
22
  max_seq_length = max_seq_length,
23
  dtype = dtype,
24
  load_in_4bit = load_in_4bit,
 
18
 
19
 
20
  model, tokenizer = FastLanguageModel.from_pretrained(
21
+ model_name = "M-Chimiste/Llama-3-8B-prime-graph-exp-1_merged",
22
  max_seq_length = max_seq_length,
23
  dtype = dtype,
24
  load_in_4bit = load_in_4bit,