shorecode commited on
Commit
5a4cfac
·
verified ·
1 Parent(s): 59203cf

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -6,7 +6,6 @@ from datasets import load_dataset, concatenate_datasets, load_from_disk
6
  import traceback
7
  from sklearn.metrics import accuracy_score
8
  import numpy as np
9
- import torch
10
 
11
  import os
12
  from huggingface_hub import login
@@ -27,7 +26,6 @@ model = AutoModelForSeq2SeqLM.from_pretrained('google/t5-efficient-tiny', num_la
27
  @spaces.GPU(duration=120)
28
  def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
29
  try:
30
- torch.cuda.empty_cache()
31
  def compute_metrics(eval_pred):
32
  logits, labels = eval_pred
33
  predictions = np.argmax(logits, axis=1)
@@ -68,9 +66,9 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
68
  save_total_limit=3,
69
  )
70
  # Check if a checkpoint exists and load it
71
- if os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir):
72
- print("Loading model from checkpoint...")
73
- model = AutoModelForSeq2SeqLM.from_pretrained(training_args.output_dir)
74
 
75
  max_length = 128
76
  try:
 
6
  import traceback
7
  from sklearn.metrics import accuracy_score
8
  import numpy as np
 
9
 
10
  import os
11
  from huggingface_hub import login
 
26
  @spaces.GPU(duration=120)
27
  def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
28
  try:
 
29
  def compute_metrics(eval_pred):
30
  logits, labels = eval_pred
31
  predictions = np.argmax(logits, axis=1)
 
66
  save_total_limit=3,
67
  )
68
  # Check if a checkpoint exists and load it
69
+ # if os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir):
70
+ # print("Loading model from checkpoint...")
71
+ # model = AutoModelForSeq2SeqLM.from_pretrained(training_args.output_dir)
72
 
73
  max_length = 128
74
  try: