Delta-Vector commited on
Commit
76eb77d
·
verified ·
1 Parent(s): 93b5d63

Upload script(1).py

Browse files
Files changed (1) hide show
  1. script(1).py +208 -0
script(1).py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from unsloth import FastModel
2
+ import torch
3
+ import json
4
+
5
+ # Model setup
6
+ model, tokenizer = FastModel.from_pretrained(
7
+ model_name = "NewEden/Gemma-Merged-V2",
8
+ max_seq_length = 8192,
9
+ load_in_4bit = False,
10
+ load_in_8bit = False,
11
+ full_finetuning = False,
12
+ )
13
+
14
+ # Add LoRA adapters
15
+ model = FastModel.get_peft_model(
16
+ model,
17
+ finetune_vision_layers=False,
18
+ finetune_language_layers=True,
19
+ finetune_attention_modules=True,
20
+ finetune_mlp_modules=True,
21
+ target_modules=[
22
+ "q_proj", "k_proj", "v_proj", "o_proj",
23
+ "gate_proj", "up_proj", "down_proj"
24
+ ],
25
+ r=64,
26
+ lora_alpha=32,
27
+ lora_dropout=0.1,
28
+ bias="none",
29
+ random_state=3407,
30
+ )
31
+
32
+ # Set up chat template
33
+ from unsloth.chat_templates import get_chat_template
34
+ tokenizer = get_chat_template(
35
+ tokenizer,
36
+ chat_template="gemma-3",
37
+ )
38
+
39
+ # Load dataset
40
+ from datasets import load_dataset, Dataset, Features, Sequence, Value
41
+ print("Loading dataset...")
42
+ dataset = load_dataset(
43
+ "NewEden/Light-Novels-Roleplay-Logs-Books-Oh-My",
44
+ split="train"
45
+ )
46
+ print(f"Dataset loaded with {len(dataset)} examples.")
47
+
48
+ # Clean + fix
49
+ def validate_and_fix_conversations(examples):
50
+ fixed = []
51
+ for conv in examples["conversations"]:
52
+ if not isinstance(conv, list):
53
+ continue
54
+ cleaned = []
55
+ for turn in conv:
56
+ if not isinstance(turn, dict):
57
+ continue
58
+ role = turn.get("role", "").lower()
59
+ content = turn.get("content", "")
60
+ if not isinstance(content, str) or not content.strip():
61
+ continue
62
+ if role == "system":
63
+ continue
64
+ if role in ["assistant", "bot", "chatbot"]:
65
+ role = "model"
66
+ elif role in ["human", "usr", "user"]:
67
+ role = "user"
68
+ else:
69
+ continue
70
+ cleaned.append({"role": role, "content": content})
71
+
72
+ if len(cleaned) < 2:
73
+ continue
74
+
75
+ if cleaned[0]["role"] != "user":
76
+ cleaned = cleaned[1:]
77
+
78
+ fixed_conv = []
79
+ expected = "user"
80
+ for turn in cleaned:
81
+ if turn["role"] == expected:
82
+ fixed_conv.append(turn)
83
+ expected = "model" if expected == "user" else "user"
84
+
85
+ if fixed_conv and fixed_conv[-1]["role"] == "user":
86
+ fixed_conv = fixed_conv[:-1]
87
+
88
+ if len(fixed_conv) >= 2:
89
+ fixed.append(fixed_conv)
90
+
91
+ return {"conversations": fixed}
92
+
93
+ print("Validating and fixing conversations...")
94
+ dataset = dataset.map(
95
+ validate_and_fix_conversations,
96
+ batched=True,
97
+ desc="Fixing conversations"
98
+ )
99
+ print(f"Validation complete. {len(dataset)} examples left.")
100
+
101
+ # Fallback dummy
102
+ if len(dataset) == 0:
103
+ print("Dataset empty after validation. Creating dummy data...")
104
+ dummy_conversations = [
105
+ [
106
+ {"role": "user", "content": "Hey, what's up?"},
107
+ {"role": "model", "content": "All good! How can I help?"}
108
+ ]
109
+ ]
110
+ flat_examples = []
111
+ for conv in dummy_conversations:
112
+ flat_examples.append({
113
+ "conversations": [{"from": msg["role"], "value": msg["content"]} for msg in conv]
114
+ })
115
+ features = Features({'conversations': Sequence({'from': Value('string'), 'value': Value('string')})})
116
+ dataset = Dataset.from_list(flat_examples, features=features)
117
+ print(f"Dummy dataset created with {len(dataset)} example.")
118
+
119
+ # Enforce strict alternation
120
+ def enforce_strict_user_model_pairs(examples):
121
+ fixed = []
122
+ for convo in examples["conversations"]:
123
+ if not isinstance(convo, list):
124
+ continue
125
+ last = None
126
+ valid = True
127
+ for turn in convo:
128
+ if turn["role"] == last:
129
+ valid = False
130
+ break
131
+ last = turn["role"]
132
+ if valid and convo[0]["role"] == "user" and convo[-1]["role"] == "model":
133
+ fixed.append(convo)
134
+ return {"conversations": fixed}
135
+
136
+ print("Enforcing strict user/model alternation...")
137
+ dataset = dataset.map(
138
+ enforce_strict_user_model_pairs,
139
+ batched=True,
140
+ desc="Filtering strict alternation"
141
+ )
142
+ print(f"After enforcing alternation: {len(dataset)} examples left.")
143
+
144
+ # Apply chat template
145
+ def apply_chat_template(examples):
146
+ texts = tokenizer.apply_chat_template(examples["conversations"])
147
+ return {"text": texts}
148
+
149
+ print("Applying chat template...")
150
+ dataset = dataset.map(
151
+ apply_chat_template,
152
+ batched=True,
153
+ desc="Applying chat template"
154
+ )
155
+ print(f"Chat template applied. {len(dataset)} examples ready.")
156
+ print("Sample text after templating:")
157
+ print(dataset[0]["text"][:500] + "...")
158
+
159
+ # Training
160
+ from trl import SFTTrainer, SFTConfig
161
+ trainer = SFTTrainer(
162
+ model=model,
163
+ tokenizer=tokenizer,
164
+ train_dataset=dataset,
165
+ eval_dataset=None,
166
+ args=SFTConfig(
167
+ dataset_text_field="text",
168
+ per_device_train_batch_size=1,
169
+ gradient_accumulation_steps=2,
170
+ warmup_steps=35,
171
+ num_train_epochs=4,
172
+ learning_rate=1e-5,
173
+ logging_steps=1,
174
+ optim="paged_adamw_8bit",
175
+ weight_decay=0.02,
176
+ lr_scheduler_type="linear",
177
+ seed=3407,
178
+ report_to="wandb",
179
+ ),
180
+ )
181
+
182
+ from unsloth.chat_templates import train_on_responses_only
183
+ print("Setting up response-only training...")
184
+ trainer = train_on_responses_only(
185
+ trainer,
186
+ instruction_part="<start_of_turn>user\n",
187
+ response_part="<start_of_turn>model\n",
188
+ )
189
+
190
+ gpu_stats = torch.cuda.get_device_properties(0)
191
+ start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
192
+ max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
193
+ print(f"GPU = {gpu_stats.name} ({max_memory} GB total)")
194
+ print(f"Starting reserved memory = {start_gpu_memory} GB")
195
+
196
+ print("Starting training...")
197
+ trainer_stats = trainer.train()
198
+
199
+ used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
200
+ used_for_lora = round(used_memory - start_gpu_memory, 3)
201
+ print(f"Training took {trainer_stats.metrics['train_runtime']} seconds "
202
+ f"({round(trainer_stats.metrics['train_runtime']/60, 2)} minutes).")
203
+ print(f"Peak memory: {used_memory} GB. Used for LoRA: {used_for_lora} GB.")
204
+
205
+ output_dir = "./gemma-finetuned"
206
+ model.save_pretrained(output_dir)
207
+ tokenizer.save_pretrained(output_dir)
208
+ print(f"Model saved at {output_dir}")