zetavg commited on
Commit
f69a138
·
1 Parent(s): 41468ea
llama_lora/lib/finetune.py CHANGED
@@ -277,10 +277,12 @@ def train(
277
  model.is_parallelizable = True
278
  model.model_parallel = True
279
 
 
280
  trainer = transformers.Trainer(
281
  model=model,
282
  train_dataset=train_data,
283
  eval_dataset=val_data,
 
284
  args=transformers.TrainingArguments(
285
  per_device_train_batch_size=micro_batch_size,
286
  gradient_accumulation_steps=gradient_accumulation_steps,
 
277
  model.is_parallelizable = True
278
  model.model_parallel = True
279
 
280
+ # https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.Trainer
281
  trainer = transformers.Trainer(
282
  model=model,
283
  train_dataset=train_data,
284
  eval_dataset=val_data,
285
+ # https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.TrainingArguments
286
  args=transformers.TrainingArguments(
287
  per_device_train_batch_size=micro_batch_size,
288
  gradient_accumulation_steps=gradient_accumulation_steps,
llama_lora/ui/finetune_ui.py CHANGED
@@ -545,8 +545,6 @@ Train data (first 10):
545
  result_message = f"Training ended:\n{str(train_output)}\n\nLogs:\n{logs_str}"
546
  print(result_message)
547
 
548
- del base_model
549
- del tokenizer
550
  clear_cache()
551
 
552
  return result_message
 
545
  result_message = f"Training ended:\n{str(train_output)}\n\nLogs:\n{logs_str}"
546
  print(result_message)
547
 
 
 
548
  clear_cache()
549
 
550
  return result_message