cheberle commited on
Commit
d3fdb20
·
1 Parent(s): 182622a
Files changed (1) hide show
  1. app.py +3 -7
app.py CHANGED
@@ -5,8 +5,6 @@ from transformers import AutoTokenizer, TrainingArguments, Trainer, AutoModelFor
5
  import torch
6
  import os
7
 
8
- # Force CPU-only execution
9
- os.environ["CUDA_VISIBLE_DEVICES"] = ""
10
 
11
  def train_model(file, hf_token):
12
  try:
@@ -19,7 +17,7 @@ def train_model(file, hf_token):
19
  tokenizer = AutoTokenizer.from_pretrained(model_name)
20
  model = AutoModelForCausalLM.from_pretrained(
21
  model_name,
22
- low_cpu_mem_usage=True,
23
  torch_dtype=torch.float32 # Ensure compatibility with CPU
24
  )
25
 
@@ -30,9 +28,7 @@ def train_model(file, hf_token):
30
  output_dir="./results",
31
  per_device_train_batch_size=1,
32
  num_train_epochs=1,
33
- no_cuda=True, # Disable GPU
34
- use_cpu=True, # Ensure CPU usage
35
- fp16=False # Disable mixed precision
36
  )
37
 
38
  trainer = Trainer(
@@ -59,4 +55,4 @@ demo = gr.Interface(
59
  )
60
 
61
  if __name__ == "__main__":
62
- demo.launch(debug=True) # Removed ssr argument
 
5
  import torch
6
  import os
7
 
 
 
8
 
9
  def train_model(file, hf_token):
10
  try:
 
17
  tokenizer = AutoTokenizer.from_pretrained(model_name)
18
  model = AutoModelForCausalLM.from_pretrained(
19
  model_name,
20
+ low_cpu_mem_usage=True, # Lower memory usage
21
  torch_dtype=torch.float32 # Ensure compatibility with CPU
22
  )
23
 
 
28
  output_dir="./results",
29
  per_device_train_batch_size=1,
30
  num_train_epochs=1,
31
+ no_cuda=True, # Explicitly disable GPU
 
 
32
  )
33
 
34
  trainer = Trainer(
 
55
  )
56
 
57
  if __name__ == "__main__":
58
+ demo.launch(debug=True, share=True) # Enable public link for easier testing