Reyad-Ahmmed commited on
Commit
5850bee
·
verified ·
1 Parent(s): e3114e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -104,7 +104,7 @@ if (runModel=='1'):
104
  # Create an instance of the custom loss function
105
  training_args = TrainingArguments(
106
  output_dir='./results_' + modelNameToUse,
107
- num_train_epochs=5,
108
  per_device_train_batch_size=8,
109
  per_device_eval_batch_size=8,
110
  warmup_steps=500,
@@ -112,8 +112,7 @@ if (runModel=='1'):
112
  logging_dir='./logs_' + modelNameToUse,
113
  logging_steps=10,
114
  evaluation_strategy="epoch", # Evaluation strategy is 'epoch'
115
- save_strategy="epoch", # Save strategy should also be 'epoch'
116
- load_best_model_at_end=True, # Load the best model based on evaluation
117
  )
118
 
119
  trainer = Trainer(
@@ -133,7 +132,6 @@ if (runModel=='1'):
133
  0: "lastmonth",
134
  1: "nextweek",
135
  2: "sevendays"
136
-
137
  }
138
 
139
  def evaluate_and_report_errors(model, dataloader, tokenizer):
@@ -227,15 +225,17 @@ if (runModel=='1'):
227
  path_in_repo="data-timeframe_model",
228
  repo_id=repo_name,
229
  token=api_token,
230
- commit_message="Update fine-tuned model"
231
  )
 
232
  upload_folder(
233
  folder_path=tokenizer_path,
234
  path_in_repo="data-timeframe_tokenizer",
235
  repo_id=repo_name,
236
  token=api_token,
237
- commit_message="Update fine-tuned tokenizer"
238
  )
 
239
 
240
  else:
241
  print('Load Pre-trained')
 
104
  # Create an instance of the custom loss function
105
  training_args = TrainingArguments(
106
  output_dir='./results_' + modelNameToUse,
107
+ num_train_epochs=10,
108
  per_device_train_batch_size=8,
109
  per_device_eval_batch_size=8,
110
  warmup_steps=500,
 
112
  logging_dir='./logs_' + modelNameToUse,
113
  logging_steps=10,
114
  evaluation_strategy="epoch", # Evaluation strategy is 'epoch'
115
+
 
116
  )
117
 
118
  trainer = Trainer(
 
132
  0: "lastmonth",
133
  1: "nextweek",
134
  2: "sevendays"
 
135
  }
136
 
137
  def evaluate_and_report_errors(model, dataloader, tokenizer):
 
225
  path_in_repo="data-timeframe_model",
226
  repo_id=repo_name,
227
  token=api_token,
228
+ commit_message="Update fine-tuned model for test"
229
  )
230
+
231
  upload_folder(
232
  folder_path=tokenizer_path,
233
  path_in_repo="data-timeframe_tokenizer",
234
  repo_id=repo_name,
235
  token=api_token,
236
+ commit_message="Update fine-tuned tokenizer for test"
237
  )
238
+ print("tokenizer folder: ", tokenizer_path)
239
 
240
  else:
241
  print('Load Pre-trained')