suhaifLLM commited on
Commit
ee30c20
·
verified ·
1 Parent(s): 73c22e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -9,7 +9,7 @@ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be Fals
9
 
10
  # Initialize the model
11
  model, tokenizer = FastLanguageModel.from_pretrained(
12
- model_name="suhaif/unsloth-llama-3-8b-4bit",
13
  max_seq_length=max_seq_length,
14
  dtype=dtype,
15
  load_in_4bit=load_in_4bit
 
9
 
10
  # Initialize the model
11
  model, tokenizer = FastLanguageModel.from_pretrained(
12
+ model_name="suhaifLLM/unsloth-llama3-8b-instruct-4bit",
13
  max_seq_length=max_seq_length,
14
  dtype=dtype,
15
  load_in_4bit=load_in_4bit