import torch import gradio as gr from transformers import ( AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, DataCollatorForLanguageModeling ) from datasets import load_dataset import logging import os # Configure environment os.environ["CUDA_VISIBLE_DEVICES"] = "" # Force CPU logging.basicConfig(level=logging.INFO) def train(): try: # Load model and tokenizer model_name = "microsoft/phi-2" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_name, device_map="cpu", trust_remote_code=True ) # Load dataset dataset = load_dataset("wikitext", "wikitext-2-raw-v1") # Tokenization def tokenize_function(examples): return tokenizer( examples["text"], padding="max_length", truncation=True, max_length=256, return_tensors="pt", ) tokenized_dataset = dataset.map( tokenize_function, batched=True, remove_columns=["text"] ) # Training setup data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=False ) training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=2, num_train_epochs=1, logging_dir="./logs", fp16=False, report_to="none" ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_dataset["train"], data_collator=data_collator, ) # Start training logging.info("Training started...") trainer.train() logging.info("Training completed!") return "✅ Training successful" except Exception as e: logging.error(f"Error: {str(e)}") return f"❌ Training failed: {str(e)}" # Gradio interface with gr.Blocks() as demo: gr.Markdown("# Phi-2 CPU Training") start_btn = gr.Button("Start Training") output = gr.Textbox() start_btn.click( fn=train, outputs=output ) if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860)