dharmendra commited on
Commit
a23c36a
·
1 Parent(s): 0242952

Fixed gated repo error by passing token to tokenizer

Browse files
Files changed (1) hide show
  1. app.py +3 -9
app.py CHANGED
@@ -18,26 +18,20 @@ app = FastAPI()
18
  # Get the Hugging Face API token from environment variables (BEST PRACTICE)
19
  HUGGINGFACEHUB_API_TOKEN = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
20
 
21
- # --- DEBUGGING LINE ADDED ---
22
- if HUGGINGFACEHUB_API_TOKEN:
23
- print(f"HUGGINGFACEHUB_API_TOKEN found: {HUGGINGFACEHUB_API_TOKEN[:5]}...{HUGGINGFACEHUB_API_TOKEN[-5:]}")
24
- else:
25
- print("HUGGINGFACEHUB_API_TOKEN is NOT set in environment variables.")
26
- # --- END DEBUGGING LINE ---
27
-
28
  if HUGGINGFACEHUB_API_TOKEN is None:
29
  raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable not set.")
30
 
31
  # --- UPDATED: Use Mistral 7B Instruct v0.3 model ---
32
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
33
 
34
- tokenizer = AutoTokenizer.from_pretrained(model_id)
 
35
  model = AutoModelForCausalLM.from_pretrained(
36
  model_id,
37
  device_map="auto",
38
  torch_dtype=torch.bfloat16,
39
  trust_remote_code=True,
40
- token=HUGGINGFACEHUB_API_TOKEN # Ensure the token is passed here
41
  )
42
 
43
  if torch.backends.mps.is_available():
 
18
  # Get the Hugging Face API token from environment variables (BEST PRACTICE)
19
  HUGGINGFACEHUB_API_TOKEN = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
20
 
 
 
 
 
 
 
 
21
  if HUGGINGFACEHUB_API_TOKEN is None:
22
  raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable not set.")
23
 
24
  # --- UPDATED: Use Mistral 7B Instruct v0.3 model ---
25
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
26
 
27
+ # --- IMPORTANT FIX: Pass token to tokenizer as well ---
28
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=HUGGINGFACEHUB_API_TOKEN)
29
  model = AutoModelForCausalLM.from_pretrained(
30
  model_id,
31
  device_map="auto",
32
  torch_dtype=torch.bfloat16,
33
  trust_remote_code=True,
34
+ token=HUGGINGFACEHUB_API_TOKEN # Token is already passed here
35
  )
36
 
37
  if torch.backends.mps.is_available():