Spaces:
Sleeping
Sleeping
add token hf api key to model
Browse files
helper.py
CHANGED
@@ -892,15 +892,18 @@ def init_safety_model(model_name, force_cpu=False):
|
|
892 |
# model_id = "meta-llama/Llama-Guard-3-8B"
|
893 |
# model_id = "meta-llama/Llama-Guard-3-1B"
|
894 |
|
|
|
|
|
895 |
safety_model = AutoModelForCausalLM.from_pretrained(
|
896 |
model_name,
|
|
|
897 |
torch_dtype=MODEL_CONFIG["safety_model"]["dtype"],
|
898 |
use_cache=True,
|
899 |
device_map="auto",
|
900 |
)
|
901 |
safety_model.config.use_cache = True
|
902 |
|
903 |
-
safety_tokenizer = AutoTokenizer.from_pretrained(model_name)
|
904 |
# Set pad token explicitly
|
905 |
safety_tokenizer.pad_token = safety_tokenizer.eos_token
|
906 |
|
|
|
892 |
# model_id = "meta-llama/Llama-Guard-3-8B"
|
893 |
# model_id = "meta-llama/Llama-Guard-3-1B"
|
894 |
|
895 |
+
api_key = get_huggingface_api_key()
|
896 |
+
|
897 |
safety_model = AutoModelForCausalLM.from_pretrained(
|
898 |
model_name,
|
899 |
+
token=api_key,
|
900 |
torch_dtype=MODEL_CONFIG["safety_model"]["dtype"],
|
901 |
use_cache=True,
|
902 |
device_map="auto",
|
903 |
)
|
904 |
safety_model.config.use_cache = True
|
905 |
|
906 |
+
safety_tokenizer = AutoTokenizer.from_pretrained(model_name, token=api_key)
|
907 |
# Set pad token explicitly
|
908 |
safety_tokenizer.pad_token = safety_tokenizer.eos_token
|
909 |
|