File size: 2,785 Bytes
d93b988
 
 
d29b7da
d93b988
 
 
 
 
 
 
d29b7da
d93b988
d29b7da
d93b988
 
 
d29b7da
 
 
d93b988
d29b7da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d93b988
d29b7da
 
 
d93b988
d29b7da
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import torch
import gradio as gr
import os
import logging
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)
from datasets import load_dataset

# Force CPU-only mode
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["BITSANDBYTES_NOWELCOME"] = "1"

# Configure logging
logging.basicConfig(level=logging.INFO)

def train():
    try:
        # Load model and tokenizer
        model_name = "microsoft/phi-2"
        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            device_map="cpu",
            trust_remote_code=True,
            load_in_4bit=False  # Disable quantization
        )

        # Add padding token
        tokenizer.pad_token = tokenizer.eos_token

        # Load sample dataset
        dataset = load_dataset("wikitext", "wikitext-2-raw-v1")

        # Tokenization function
        def tokenize_function(examples):
            return tokenizer(
                examples["text"],
                padding="max_length",
                truncation=True,
                max_length=256,
                return_tensors="pt",
            )

        tokenized_dataset = dataset.map(
            tokenize_function,
            batched=True,
            remove_columns=["text"]
        )

        # Data collator
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=tokenizer,
            mlm=False
        )

        # Training arguments
        training_args = TrainingArguments(
            output_dir="./results",
            per_device_train_batch_size=2,
            per_device_eval_batch_size=2,
            num_train_epochs=1,  # Reduced for testing
            logging_dir="./logs",
            fp16=False,
            bf16=False,
            use_cpu=True  # Explicit CPU usage
        )

        # Trainer
        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=tokenized_dataset["train"],
            data_collator=data_collator,
        )

        # Start training
        logging.info("Starting training...")
        trainer.train()
        logging.info("Training completed!")
        
        return "βœ… Training successful! Model saved."

    except Exception as e:
        logging.error(f"Error: {str(e)}")
        return f"❌ Training failed: {str(e)}"

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Phi-2 CPU Training")
    start_btn = gr.Button("Start Training")
    output = gr.Textbox()
    
    start_btn.click(
        fn=train,
        outputs=output
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860)