Reyad-Ahmmed commited on
Commit
b626304
·
verified ·
1 Parent(s): 36150aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -38
app.py CHANGED
@@ -97,19 +97,8 @@ if (runModel=='1'):
97
  test_dataset = IntentDataset(test_encodings, list(test_df['label']))
98
 
99
 
100
- # Your repository name
101
- repo_name = "Reyad-Ahmmed/hf-data-timeframe"
102
 
103
 
104
- api_token = os.getenv("HF_API_TOKEN") # Retrieve the API token from environment variable
105
-
106
- if not api_token:
107
- raise ValueError("API token not found. Please set the HF_API_TOKEN environment variable.")
108
-
109
- # Create repository (if not already created)
110
- api = HfApi()
111
- create_repo(repo_id=repo_name, token=api_token, exist_ok=True)
112
-
113
 
114
  # Create an instance of the custom loss function
115
  training_args = TrainingArguments(
@@ -192,35 +181,12 @@ if (runModel=='1'):
192
  evaluate_and_report_errors(model,train_dataloader, tokenizer)
193
 
194
  # Save the model and tokenizer
195
- #model.save_pretrained('./' + modelNameToUse + '_model')
196
- #tokenizer.save_pretrained('./' + modelNameToUse + '_tokenizer')
197
 
198
- # Save the model and tokenizer locally
199
- local_model_path = "./data-timeframe_model"
200
- local_tokenizer_path = "./data-timeframe_tokenizer"
201
-
202
- # Ensure the directories exist
203
- os.makedirs(local_model_path, exist_ok=True)
204
- os.makedirs(local_tokenizer_path, exist_ok=True)
205
 
206
- model.save_pretrained(local_model_path)
207
- tokenizer.save_pretrained(local_tokenizer_path)
208
-
209
- # Upload the model and tokenizer to the Hugging Face repository
210
- upload_folder(
211
- folder_path=local_model_path,
212
- path_in_repo="data-timeframe_model",
213
- repo_id=repo_name,
214
- token=api_token,
215
- commit_message="Update fine-tuned model"
216
- )
217
- upload_folder(
218
- folder_path=local_tokenizer_path,
219
- path_in_repo="data-timeframe_tokenizer",
220
- repo_id=repo_name,
221
- token=api_token,
222
- commit_message="Update fine-tuned tokenizer"
223
- )
224
 
225
 
226
  else:
 
97
  test_dataset = IntentDataset(test_encodings, list(test_df['label']))
98
 
99
 
 
 
100
 
101
 
 
 
 
 
 
 
 
 
 
102
 
103
  # Create an instance of the custom loss function
104
  training_args = TrainingArguments(
 
181
  evaluate_and_report_errors(model,train_dataloader, tokenizer)
182
 
183
  # Save the model and tokenizer
184
+ model.save_pretrained('./' + modelNameToUse + '_model')
185
+ tokenizer.save_pretrained('./' + modelNameToUse + '_tokenizer')
186
 
 
 
 
 
 
 
 
187
 
188
+
189
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
 
192
  else: