Kevin Fink commited on
Commit
a37721f
·
1 Parent(s): 7035d60
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -98,7 +98,7 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
98
  max_length=max_length, # Set to None for dynamic padding
99
  truncation=True,
100
  padding='max_length',
101
- #return_tensors='pt',
102
  )
103
 
104
  # Setup the decoder input IDs (shifted right)
@@ -108,7 +108,7 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
108
  truncation=True,
109
  padding='max_length',
110
  #text_target=examples['target'],
111
- #return_tensors='pt',
112
  )
113
  #labels["input_ids"] = [
114
  # [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
@@ -211,7 +211,7 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
211
  valid_set = dataset['validation'].map(tokenize_function, batched=True)
212
 
213
  print(train_set['labels'])
214
-
215
  print('DONE')
216
 
217
 
 
98
  max_length=max_length, # Set to None for dynamic padding
99
  truncation=True,
100
  padding='max_length',
101
+ return_tensors='pt',
102
  )
103
 
104
  # Setup the decoder input IDs (shifted right)
 
108
  truncation=True,
109
  padding='max_length',
110
  #text_target=examples['target'],
111
+ return_tensors='pt',
112
  )
113
  #labels["input_ids"] = [
114
  # [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
 
211
  valid_set = dataset['validation'].map(tokenize_function, batched=True)
212
 
213
  print(train_set['labels'])
214
+ print(train_set.keys())
215
  print('DONE')
216
 
217