antonvo commited on
Commit
dc76fbd
·
1 Parent(s): 6ca87ba

training script

Browse files
Files changed (1) hide show
  1. train.py +103 -0
train.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from accelerate import Accelerator
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments
4
+ from peft import LoraConfig
5
+ from trl import is_xpu_available
6
+
7
+ # sft.py script version 0.7.10
8
+
9
+ device_map = (
10
+ {"": f"xpu:{Accelerator().local_process_index}"}
11
+ if is_xpu_available()
12
+ else {"": Accelerator().local_process_index}
13
+ )
14
+
15
+ torch_dtype = torch.bfloat16
16
+
17
+ quantization_config = BitsAndBytesConfig(
18
+ load_in_4bit=True
19
+ )
20
+
21
+ training_args = TrainingArguments(
22
+ output_dir = './output1',
23
+ per_device_train_batch_size = 2,
24
+ gradient_accumulation_steps = 1,
25
+ learning_rate = 2e-4,
26
+ logging_steps = 1,
27
+ num_train_epochs = 3,
28
+ max_steps = -1,
29
+ #report_to = 'wandb',
30
+ save_steps = 200_000,
31
+ save_total_limit = 10,
32
+ push_to_hub = False,
33
+ hub_model_id = None,
34
+ gradient_checkpointing = False,
35
+ gradient_checkpointing_kwargs = dict(use_reentrant=False),
36
+ fp16 = False,
37
+ bf16 = False,
38
+ )
39
+
40
+ peft_config = LoraConfig(
41
+ r = 16,
42
+ lora_alpha = 32,
43
+ bias = "none",
44
+ task_type = "CAUSAL_LM",
45
+ target_modules = ['q_proj', 'k_proj', 'v_proj', 'o_proj']
46
+ )
47
+
48
+ model_name = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
49
+ model = AutoModelForCausalLM.from_pretrained(
50
+ model_name,
51
+ quantization_config = quantization_config,
52
+ device_map = device_map,
53
+ trust_remote_code = False,
54
+ torch_dtype = torch_dtype
55
+ )
56
+
57
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
58
+ tokenizer.pad_token = tokenizer.eos_token
59
+ tokenizer.padding_side = 'right'
60
+
61
+ ################
62
+
63
+ from datasets import load_dataset, DatasetDict, concatenate_datasets
64
+ import pandas as pd
65
+
66
+ # Filenames of your CSV files
67
+ filenames = ['./select-1.csv', './select-2.csv', './select-3.csv']
68
+
69
+ # Load datasets and split each
70
+ split_datasets = {'train': [], 'validation': []}
71
+
72
+ for filename in filenames:
73
+ # Load the CSV file as a Dataset
74
+ dataset = load_dataset('csv', data_files=filename, split='train')
75
+
76
+ # Split the dataset into training and validation
77
+ split = dataset.train_test_split(test_size=0.2, seed=42)
78
+
79
+ # Append the split datasets to the corresponding lists
80
+ split_datasets['train'].append(split['train'])
81
+ split_datasets['validation'].append(split['test'])
82
+
83
+ # Concatenate the datasets for training and validation
84
+ train_dataset = concatenate_datasets(split_datasets['train'])
85
+ eval_dataset = concatenate_datasets(split_datasets['validation'])
86
+
87
+ #################
88
+
89
+ from trl import SFTTrainer
90
+
91
+ trainer = SFTTrainer(
92
+ model = model,
93
+ args = training_args,
94
+ max_seq_length = 512, #32 * 1024,
95
+ train_dataset = train_dataset,
96
+ eval_dataset = eval_dataset,
97
+ dataset_text_field = 'text',
98
+ peft_config = peft_config,
99
+ tokenizer = tokenizer
100
+ )
101
+
102
+ trainer.train()
103
+ trainer.save_model('./output1')