Kevin Fink commited on
Commit
11677cd
·
1 Parent(s): 45a9555
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -6,7 +6,7 @@ from datasets import load_dataset, concatenate_datasets, load_from_disk
6
  import traceback
7
  from sklearn.metrics import accuracy_score
8
  import numpy as np
9
- #import torch
10
 
11
  import os
12
  from huggingface_hub import login
@@ -17,7 +17,7 @@ os.environ['HF_HOME'] = '/data/.huggingface'
17
  @spaces.GPU(duration=120)
18
  def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
19
  try:
20
- #torch.cuda.empty_cache()
21
  def compute_metrics(eval_pred):
22
  logits, labels = eval_pred
23
  predictions = np.argmax(logits, axis=1)
@@ -35,7 +35,7 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
35
  )
36
 
37
  # Load the model and tokenizer
38
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip(), num_labels=2)
39
  model.gradient_checkpointing_enable()
40
  #model = get_peft_model(model, lora_config)
41
 
 
6
  import traceback
7
  from sklearn.metrics import accuracy_score
8
  import numpy as np
9
+ import torch
10
 
11
  import os
12
  from huggingface_hub import login
 
17
  @spaces.GPU(duration=120)
18
  def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
19
  try:
20
+ torch.cuda.empty_cache()
21
  def compute_metrics(eval_pred):
22
  logits, labels = eval_pred
23
  predictions = np.argmax(logits, axis=1)
 
35
  )
36
 
37
  # Load the model and tokenizer
38
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip(), num_labels=2, force_download=True)
39
  model.gradient_checkpointing_enable()
40
  #model = get_peft_model(model, lora_config)
41