Cylanoid commited on
Commit
89b3781
·
verified ·
1 Parent(s): 8e1a378

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -14,16 +14,20 @@ import sentencepiece
14
  import huggingface_hub
15
  from transformers import TrainingArguments, Trainer
16
 
 
 
 
17
  # Retrieve the token from Hugging Face Space secrets
18
- llama = os.getenv("llama:levi put token here") # Token expected as env variable 'llama'
19
- if not llama:
20
- raise ValueError("llama token not found in environment variables. Please set it in Hugging Face Space secrets under 'Settings' > 'Secrets' as 'llama'.")
 
21
 
22
  # Debug: Print the token to verify it's being read (remove this in production)
23
- print(f"Retrieved llama token: {llama[:5]}... (first 5 chars for security)")
24
 
25
  # Authenticate with Hugging Face
26
- huggingface_hub.login(token=llama)
27
 
28
  # Model setup
29
  MODEL_ID = "meta-llama/Llama-2-7b-hf"
@@ -146,7 +150,7 @@ def train_ui(files):
146
  return "Training completed! Model saved to ./fine_tuned_llama_healthcare"
147
 
148
  except Exception as e:
149
- return f"Error: {str(e)}. Please check file format, dependencies, or the llama token."
150
 
151
  # Gradio UI
152
  with gr.Blocks(title="Healthcare Fraud Detection Fine-Tuning") as demo:
 
14
  import huggingface_hub
15
  from transformers import TrainingArguments, Trainer
16
 
17
+ # Debug: Print all environment variables to verify 'LLama' is present
18
+ print("Environment variables:", dict(os.environ))
19
+
20
  # Retrieve the token from Hugging Face Space secrets
21
+ # Token placement: LLama:levi put token here
22
+ LLama = os.getenv("LLama") # Retrieves the value of the 'LLama' environment variable
23
+ if not LLama:
24
+ raise ValueError("LLama token not found in environment variables. Please set it in Hugging Face Space secrets under 'Settings' > 'Secrets' as 'LLama'.")
25
 
26
  # Debug: Print the token to verify it's being read (remove this in production)
27
+ print(f"Retrieved LLama token: {LLama[:5]}... (first 5 chars for security)")
28
 
29
  # Authenticate with Hugging Face
30
+ huggingface_hub.login(token=LLama)
31
 
32
  # Model setup
33
  MODEL_ID = "meta-llama/Llama-2-7b-hf"
 
150
  return "Training completed! Model saved to ./fine_tuned_llama_healthcare"
151
 
152
  except Exception as e:
153
+ return f"Error: {str(e)}. Please check file format, dependencies, or the LLama token."
154
 
155
  # Gradio UI
156
  with gr.Blocks(title="Healthcare Fraud Detection Fine-Tuning") as demo: