Kevin Fink commited on
Commit
94704a5
·
1 Parent(s): d86b87f
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -25,7 +25,7 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
25
  model = get_peft_model(model, lora_config)
26
  tokenizer = AutoTokenizer.from_pretrained(model_name)
27
 
28
- max_length = 256
29
 
30
  # Tokenize the dataset
31
  def tokenize_function(examples):
@@ -63,7 +63,7 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
63
  per_device_eval_batch_size=int(batch_size),
64
  num_train_epochs=int(num_epochs),
65
  weight_decay=0.01,
66
- gradient_accumulation_steps=int(grad),
67
  load_best_model_at_end=True,
68
  metric_for_best_model="accuracy",
69
  greater_is_better=True,
@@ -73,7 +73,7 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
73
  hub_model_id=hub_id.strip(),
74
  fp16=True,
75
  #lr_scheduler_type='cosine',
76
- save_steps=500, # Save checkpoint every 500 steps
77
  save_total_limit=3,
78
  )
79
  # Check if a checkpoint exists and load it
 
25
  model = get_peft_model(model, lora_config)
26
  tokenizer = AutoTokenizer.from_pretrained(model_name)
27
 
28
+ max_length = 64
29
 
30
  # Tokenize the dataset
31
  def tokenize_function(examples):
 
63
  per_device_eval_batch_size=int(batch_size),
64
  num_train_epochs=int(num_epochs),
65
  weight_decay=0.01,
66
+ #gradient_accumulation_steps=int(grad),
67
  load_best_model_at_end=True,
68
  metric_for_best_model="accuracy",
69
  greater_is_better=True,
 
73
  hub_model_id=hub_id.strip(),
74
  fp16=True,
75
  #lr_scheduler_type='cosine',
76
+ save_steps=200, # Save checkpoint every 500 steps
77
  save_total_limit=3,
78
  )
79
  # Check if a checkpoint exists and load it