Kevin Fink commited on
Commit
909a043
·
1 Parent(s): a20d3d9
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -244,8 +244,7 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
244
  return 'DONE!'#train_result
245
 
246
  # Define Gradio interface
247
- def predict(text):
248
- model = AutoModelForSeq2SeqLM.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer', num_labels=2)
249
  tokenizer = AutoTokenizer.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer')
250
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
251
  outputs = model(inputs)
@@ -274,7 +273,7 @@ def run_train(text):
274
  )
275
  #model = get_peft_model(model, lora_config)
276
  #result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
277
- result = predict(text)
278
  return result
279
  # Create Gradio interface
280
  try:
 
244
  return 'DONE!'#train_result
245
 
246
  # Define Gradio interface
247
+ def predict(text, model):
 
248
  tokenizer = AutoTokenizer.from_pretrained('shorecode/t5-efficient-tiny-nh8-summarizer')
249
  inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
250
  outputs = model(inputs)
 
273
  )
274
  #model = get_peft_model(model, lora_config)
275
  #result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
276
+ result = predict(text, model)
277
  return result
278
  # Create Gradio interface
279
  try: