File size: 765 Bytes
d93b988
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch
import gradio as gr
import os
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)

# Force CPU mode
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["BITSANDBYTES_NOWELCOME"] = "1"

def train():
    model = AutoModelForCausalLM.from_pretrained(
        "microsoft/phi-2",
        device_map="auto",
        trust_remote_code=True,
        load_in_4bit=False  # Disable quantization
    )
    
    training_args = TrainingArguments(
        output_dir="./results",
        per_device_train_batch_size=2,
        num_train_epochs=3,
        use_cpu=True,  # Explicit CPU usage
        fp16=False,
        bf16=False,
    )
    
    # Rest of training code...