RishabhBhardwaj commited on
Commit
45a4564
·
1 Parent(s): 7998472
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -16,7 +16,7 @@ Answer: [/INST]
16
  # Load the model and tokenizer
17
  @st.cache_resource
18
  def load_model():
19
- model_name = "meta-llama/Llama-Guard-3-8B"
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name, load_in_4bit=True)
22
  return tokenizer, model
 
16
  # Load the model and tokenizer
17
  @st.cache_resource
18
  def load_model():
19
+ model_name = "meta-llama/Llama-Guard-3-8B-INT8"
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name, load_in_4bit=True)
22
  return tokenizer, model