# -*- coding: utf-8 -*- """Gemma3_(4B).ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma3_(4B).ipynb To run this, press "*Runtime*" and press "*Run all*" on a **free** Tesla T4 Google Colab instance!
Join Discord if you need help + ⭐ Star us on Github
To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://docs.unsloth.ai/get-started/installing-+-updating). You will learn how to do [data prep](#Data), how to [train](#Train), how to [run the model](#Inference), & [how to save it](#Save) ### News **Read our [Gemma 3 blog](https://unsloth.ai/blog/gemma3) for what's new in Unsloth and our [Reasoning blog](https://unsloth.ai/blog/r1-reasoning) on how to train reasoning models.** Visit our docs for all our [model uploads](https://docs.unsloth.ai/get-started/all-our-models) and [notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks). ### Installation """ # Commented out IPython magic to ensure Python compatibility. # %%capture # import os # if "COLAB_" not in "".join(os.environ.keys()): # !pip install unsloth # else: # # Do this only in Colab notebooks! Otherwise use pip install unsloth # !pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft trl triton cut_cross_entropy unsloth_zoo # !pip install sentencepiece protobuf datasets huggingface_hub hf_transfer # !pip install --no-deps unsloth # # Install latest Hugging Face for Gemma-3! # !pip install --no-deps git+https://github.com/huggingface/transformers@v4.49.0-Gemma-3 """### Unsloth `FastModel` supports loading nearly any model now! This includes Vision and Text models! """ from unsloth import FastModel import torch fourbit_models = [ # 4bit dynamic quants for superior accuracy and low memory use "unsloth/gemma-3-1b-it-unsloth-bnb-4bit", "unsloth/gemma-3-4b-it-unsloth-bnb-4bit", "unsloth/gemma-3-12b-it-unsloth-bnb-4bit", "unsloth/gemma-3-27b-it-unsloth-bnb-4bit", # Other popular models! "unsloth/Llama-3.1-8B", "unsloth/Llama-3.2-3B", "unsloth/Llama-3.3-70B", "unsloth/mistral-7b-instruct-v0.3", "unsloth/Phi-4", ] # More models at https://huggingface.co/unsloth model, tokenizer = FastModel.from_pretrained( model_name = "unsloth/gemma-3-4b-it", max_seq_length = 8192, # Choose any for long context! load_in_4bit = False, # 4 bit quantization to reduce memory load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory full_finetuning = True, # [NEW!] We have full finetuning now! # token = "hf_...", # use one if using gated models ) """We now add LoRA adapters so we only need to update a small amount of parameters!""" model = FastModel.get_peft_model( model, finetune_vision_layers = False, # Turn off for just text! finetune_language_layers = True, # Should leave on! finetune_attention_modules = True, # Attention good for GRPO finetune_mlp_modules = True, # SHould leave on always! r = 64, # Larger = higher accuracy, but might overfit lora_alpha = 32, # Recommended alpha == r at least lora_dropout = 0.1, bias = "none", random_state = 3407, ) """ ### Data Prep We now use the `Gemma-3` format for conversation style finetunes. We use [Maxime Labonne's FineTome-100k](https://huggingface.co/datasets/mlabonne/FineTome-100k) dataset in ShareGPT style. Gemma-3 renders multi turn conversations like below: ``` user Hello! model Hey there! ``` We use our `get_chat_template` function to get the correct chat template. We support `zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, phi3, llama3, phi4, qwen2.5, gemma3` and more. """ from unsloth.chat_templates import get_chat_template tokenizer = get_chat_template( tokenizer, chat_template = "gemma-3", ) from datasets import load_dataset dataset = load_dataset("FourOhFour/RP_Phase", split = "train") """We now use `standardize_data_formats` to try converting datasets to the correct format for finetuning purposes!""" from unsloth.chat_templates import standardize_data_formats dataset = standardize_data_formats(dataset) """Let's see how row 100 looks like!""" dataset[100] """We validate and fix conversations to ensure proper role alternation""" def validate_and_fix_conversations(examples): valid_convs = [] for conv in examples["conversations"]: # Check if roles alternate properly prev_role = None # Clean up the conversation to ensure proper alternation fixed_conv = [] for turn in conv: role = turn.get("role", "").lower() # Skip if same role appears consecutively if role == prev_role: continue # Normalize roles to expected format if role in ["assistant", "bot", "chatbot"]: role = "model" elif role in ["human", "usr"]: role = "user" fixed_conv.append({"role": role, "content": turn.get("content", "")}) prev_role = role # Ensure it starts with user and alternates correctly if fixed_conv and fixed_conv[0]["role"] == "user": valid_convs.append(fixed_conv) return {"conversations": valid_convs} # Apply the validation and fixing step dataset = dataset.map(validate_and_fix_conversations, batched=True) """We now have to apply the chat template for `Gemma-3` onto the conversations, and save it to `text`""" def apply_chat_template(examples): texts = tokenizer.apply_chat_template(examples["conversations"]) return { "text" : texts } dataset = dataset.map(apply_chat_template, batched = True) """Let's see how the chat template did! Notice `Gemma-3` default adds a ``!""" dataset[100]["text"] """ ### Train the model Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`. """ from trl import SFTTrainer, SFTConfig trainer = SFTTrainer( model = model, tokenizer = tokenizer, train_dataset = dataset, eval_dataset = None, # Can set up evaluation! args = SFTConfig( dataset_text_field = "text", per_device_train_batch_size = 2, gradient_accumulation_steps = 4, # Use GA to mimic batch size! warmup_steps = 35, num_train_epochs = 2, # Set this for 1 full training run. learning_rate = 1e-5, # Reduce to 2e-5 for long training runs logging_steps = 1, optim = "paged_adamw_8bit", weight_decay = 0.02, lr_scheduler_type = "linear", seed = 3407, report_to = "wandb", # Use this for WandB etc ), ) """We also use Unsloth's `train_on_completions` method to only train on the assistant outputs and ignore the loss on the user's inputs. This helps increase accuracy of finetunes!""" from unsloth.chat_templates import train_on_responses_only trainer = train_on_responses_only( trainer, instruction_part = "user\n", response_part = "model\n", ) """Let's verify masking the instruction part is done! Let's print the 100th row again:""" tokenizer.decode(trainer.train_dataset[100]["input_ids"]) """Now let's print the masked out example - you should see only the answer is present:""" tokenizer.decode([tokenizer.pad_token_id if x == -100 else x for x in trainer.train_dataset[100]["labels"]]).replace(tokenizer.pad_token, " ") # @title Show current memory stats gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") """Let's train the model! To resume a training run, set `trainer.train(resume_from_checkpoint = True)`""" trainer_stats = trainer.train() # @title Show final memory and time stats used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) used_memory_for_lora = round(used_memory - start_gpu_memory, 3) used_percentage = round(used_memory / max_memory * 100, 3) lora_percentage = round(used_memory_for_lora / max_memory * 100, 3) print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.") print( f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training." ) print(f"Peak reserved memory = {used_memory} GB.") print(f"Peak reserved memory for training = {used_memory_for_lora} GB.") print(f"Peak reserved memory % of max memory = {used_percentage} %.") print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")