Delta-Vector commited on
Commit
027f814
·
verified ·
1 Parent(s): 989107e

Upload gemma.py

Browse files
Files changed (1) hide show
  1. gemma.py +228 -0
gemma.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Gemma3_(4B).ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma3_(4B).ipynb
8
+
9
+ To run this, press "*Runtime*" and press "*Run all*" on a **free** Tesla T4 Google Colab instance!
10
+ <div class="align-center">
11
+ <a href="https://unsloth.ai/"><img src="https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png" width="115"></a>
12
+ <a href="https://discord.gg/unsloth"><img src="https://github.com/unslothai/unsloth/raw/main/images/Discord button.png" width="145"></a>
13
+ <a href="https://docs.unsloth.ai/"><img src="https://github.com/unslothai/unsloth/blob/main/images/documentation%20green%20button.png?raw=true" width="125"></a></a> Join Discord if you need help + ⭐ <i>Star us on <a href="https://github.com/unslothai/unsloth">Github</a> </i> ⭐
14
+ </div>
15
+
16
+ To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://docs.unsloth.ai/get-started/installing-+-updating).
17
+
18
+ You will learn how to do [data prep](#Data), how to [train](#Train), how to [run the model](#Inference), & [how to save it](#Save)
19
+
20
+ ### News
21
+
22
+ **Read our [Gemma 3 blog](https://unsloth.ai/blog/gemma3) for what's new in Unsloth and our [Reasoning blog](https://unsloth.ai/blog/r1-reasoning) on how to train reasoning models.**
23
+
24
+ Visit our docs for all our [model uploads](https://docs.unsloth.ai/get-started/all-our-models) and [notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks).
25
+
26
+ ### Installation
27
+ """
28
+
29
+ # Commented out IPython magic to ensure Python compatibility.
30
+ # %%capture
31
+ # import os
32
+ # if "COLAB_" not in "".join(os.environ.keys()):
33
+ # !pip install unsloth
34
+ # else:
35
+ # # Do this only in Colab notebooks! Otherwise use pip install unsloth
36
+ # !pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft trl triton cut_cross_entropy unsloth_zoo
37
+ # !pip install sentencepiece protobuf datasets huggingface_hub hf_transfer
38
+ # !pip install --no-deps unsloth
39
+ # # Install latest Hugging Face for Gemma-3!
40
+ # !pip install --no-deps git+https://github.com/huggingface/[email protected]
41
+
42
+ """### Unsloth
43
+
44
+ `FastModel` supports loading nearly any model now! This includes Vision and Text models!
45
+ """
46
+
47
+ from unsloth import FastModel
48
+ import torch
49
+
50
+ fourbit_models = [
51
+ # 4bit dynamic quants for superior accuracy and low memory use
52
+ "unsloth/gemma-3-1b-it-unsloth-bnb-4bit",
53
+ "unsloth/gemma-3-4b-it-unsloth-bnb-4bit",
54
+ "unsloth/gemma-3-12b-it-unsloth-bnb-4bit",
55
+ "unsloth/gemma-3-27b-it-unsloth-bnb-4bit",
56
+
57
+ # Other popular models!
58
+ "unsloth/Llama-3.1-8B",
59
+ "unsloth/Llama-3.2-3B",
60
+ "unsloth/Llama-3.3-70B",
61
+ "unsloth/mistral-7b-instruct-v0.3",
62
+ "unsloth/Phi-4",
63
+ ] # More models at https://huggingface.co/unsloth
64
+
65
+ model, tokenizer = FastModel.from_pretrained(
66
+ model_name = "unsloth/gemma-3-4b-it",
67
+ max_seq_length = 8192, # Choose any for long context!
68
+ load_in_4bit = False, # 4 bit quantization to reduce memory
69
+ load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
70
+ full_finetuning = True, # [NEW!] We have full finetuning now!
71
+ # token = "hf_...", # use one if using gated models
72
+ )
73
+
74
+ """We now add LoRA adapters so we only need to update a small amount of parameters!"""
75
+
76
+ model = FastModel.get_peft_model(
77
+ model,
78
+ finetune_vision_layers = False, # Turn off for just text!
79
+ finetune_language_layers = True, # Should leave on!
80
+ finetune_attention_modules = True, # Attention good for GRPO
81
+ finetune_mlp_modules = True, # SHould leave on always!
82
+
83
+ r = 64, # Larger = higher accuracy, but might overfit
84
+ lora_alpha = 32, # Recommended alpha == r at least
85
+ lora_dropout = 0.1,
86
+ bias = "none",
87
+ random_state = 3407,
88
+ )
89
+
90
+ """<a name="Data"></a>
91
+ ### Data Prep
92
+ We now use the `Gemma-3` format for conversation style finetunes. We use [Maxime Labonne's FineTome-100k](https://huggingface.co/datasets/mlabonne/FineTome-100k) dataset in ShareGPT style. Gemma-3 renders multi turn conversations like below:
93
+
94
+ ```
95
+ <bos><start_of_turn>user
96
+ Hello!<end_of_turn>
97
+ <start_of_turn>model
98
+ Hey there!<end_of_turn>
99
+ ```
100
+
101
+ We use our `get_chat_template` function to get the correct chat template. We support `zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, phi3, llama3, phi4, qwen2.5, gemma3` and more.
102
+ """
103
+
104
+ from unsloth.chat_templates import get_chat_template
105
+ tokenizer = get_chat_template(
106
+ tokenizer,
107
+ chat_template = "gemma-3",
108
+ )
109
+ from datasets import load_dataset
110
+ dataset = load_dataset("FourOhFour/RP_Phase", split = "train")
111
+
112
+ """We now use `standardize_data_formats` to try converting datasets to the correct format for finetuning purposes!"""
113
+ from unsloth.chat_templates import standardize_data_formats
114
+ dataset = standardize_data_formats(dataset)
115
+
116
+ """Let's see how row 100 looks like!"""
117
+ dataset[100]
118
+
119
+ """We validate and fix conversations to ensure proper role alternation"""
120
+ def validate_and_fix_conversations(examples):
121
+ valid_convs = []
122
+ for conv in examples["conversations"]:
123
+ # Check if roles alternate properly
124
+ prev_role = None
125
+
126
+ # Clean up the conversation to ensure proper alternation
127
+ fixed_conv = []
128
+ for turn in conv:
129
+ role = turn.get("role", "").lower()
130
+ # Skip if same role appears consecutively
131
+ if role == prev_role:
132
+ continue
133
+ # Normalize roles to expected format
134
+ if role in ["assistant", "bot", "chatbot"]:
135
+ role = "model"
136
+ elif role in ["human", "usr"]:
137
+ role = "user"
138
+
139
+ fixed_conv.append({"role": role, "content": turn.get("content", "")})
140
+ prev_role = role
141
+
142
+ # Ensure it starts with user and alternates correctly
143
+ if fixed_conv and fixed_conv[0]["role"] == "user":
144
+ valid_convs.append(fixed_conv)
145
+
146
+ return {"conversations": valid_convs}
147
+
148
+ # Apply the validation and fixing step
149
+ dataset = dataset.map(validate_and_fix_conversations, batched=True)
150
+
151
+ """We now have to apply the chat template for `Gemma-3` onto the conversations, and save it to `text`"""
152
+ def apply_chat_template(examples):
153
+ texts = tokenizer.apply_chat_template(examples["conversations"])
154
+ return { "text" : texts }
155
+
156
+ dataset = dataset.map(apply_chat_template, batched = True)
157
+
158
+ """Let's see how the chat template did! Notice `Gemma-3` default adds a `<bos>`!"""
159
+ dataset[100]["text"]
160
+
161
+ """<a name="Train"></a>
162
+ ### Train the model
163
+ Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`.
164
+ """
165
+
166
+ from trl import SFTTrainer, SFTConfig
167
+ trainer = SFTTrainer(
168
+ model = model,
169
+ tokenizer = tokenizer,
170
+ train_dataset = dataset,
171
+ eval_dataset = None, # Can set up evaluation!
172
+ args = SFTConfig(
173
+ dataset_text_field = "text",
174
+ per_device_train_batch_size = 2,
175
+ gradient_accumulation_steps = 4, # Use GA to mimic batch size!
176
+ warmup_steps = 35,
177
+ num_train_epochs = 2, # Set this for 1 full training run.
178
+ learning_rate = 1e-5, # Reduce to 2e-5 for long training runs
179
+ logging_steps = 1,
180
+ optim = "paged_adamw_8bit",
181
+ weight_decay = 0.02,
182
+ lr_scheduler_type = "linear",
183
+ seed = 3407,
184
+ report_to = "wandb", # Use this for WandB etc
185
+ ),
186
+ )
187
+
188
+ """We also use Unsloth's `train_on_completions` method to only train on the assistant outputs and ignore the loss on the user's inputs. This helps increase accuracy of finetunes!"""
189
+
190
+ from unsloth.chat_templates import train_on_responses_only
191
+ trainer = train_on_responses_only(
192
+ trainer,
193
+ instruction_part = "<start_of_turn>user\n",
194
+ response_part = "<start_of_turn>model\n",
195
+ )
196
+
197
+ """Let's verify masking the instruction part is done! Let's print the 100th row again:"""
198
+
199
+ tokenizer.decode(trainer.train_dataset[100]["input_ids"])
200
+
201
+ """Now let's print the masked out example - you should see only the answer is present:"""
202
+
203
+ tokenizer.decode([tokenizer.pad_token_id if x == -100 else x for x in trainer.train_dataset[100]["labels"]]).replace(tokenizer.pad_token, " ")
204
+
205
+ # @title Show current memory stats
206
+ gpu_stats = torch.cuda.get_device_properties(0)
207
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
208
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
209
+ print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
210
+ print(f"{start_gpu_memory} GB of memory reserved.")
211
+
212
+ """Let's train the model! To resume a training run, set `trainer.train(resume_from_checkpoint = True)`"""
213
+
214
+ trainer_stats = trainer.train()
215
+
216
+ # @title Show final memory and time stats
217
+ used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
218
+ used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
219
+ used_percentage = round(used_memory / max_memory * 100, 3)
220
+ lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
221
+ print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
222
+ print(
223
+ f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training."
224
+ )
225
+ print(f"Peak reserved memory = {used_memory} GB.")
226
+ print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
227
+ print(f"Peak reserved memory % of max memory = {used_percentage} %.")
228
+ print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")