Kevin Fink commited on
Commit
5912a26
·
1 Parent(s): d9a6e5a
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -13,15 +13,7 @@ from peft import get_peft_model, LoraConfig
13
 
14
  os.environ['HF_HOME'] = '/data/.huggingface'
15
 
16
- lora_config = LoraConfig(
17
- r=16, # Rank of the low-rank adaptation
18
- lora_alpha=32, # Scaling factor
19
- lora_dropout=0.1, # Dropout for LoRA layers
20
- bias="none" # Bias handling
21
- )
22
- model = AutoModelForSeq2SeqLM.from_pretrained('google/t5-efficient-tiny', num_labels=2, force_download=True)
23
- model = get_peft_model(model, lora_config)
24
- model.gradient_checkpointing_enable()
25
 
26
 
27
  def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
@@ -143,6 +135,15 @@ def predict(text):
143
 
144
  @spaces.GPU(duration=120)
145
  def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
 
 
 
 
 
 
 
 
 
146
  result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
147
  return result
148
  # Create Gradio interface
 
13
 
14
  os.environ['HF_HOME'] = '/data/.huggingface'
15
 
16
+
 
 
 
 
 
 
 
 
17
 
18
 
19
  def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
 
135
 
136
  @spaces.GPU(duration=120)
137
  def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
138
+ lora_config = LoraConfig(
139
+ r=16, # Rank of the low-rank adaptation
140
+ lora_alpha=32, # Scaling factor
141
+ lora_dropout=0.1, # Dropout for LoRA layers
142
+ bias="none" # Bias handling
143
+ )
144
+ model = AutoModelForSeq2SeqLM.from_pretrained('google/t5-efficient-tiny', num_labels=2, force_download=True)
145
+ model = get_peft_model(model, lora_config)
146
+ model.gradient_checkpointing_enable()
147
  result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
148
  return result
149
  # Create Gradio interface