Saboorhsn commited on
Commit
f5210ab
·
verified ·
1 Parent(s): 2e965db

Create train.py

Browse files
Files changed (1) hide show
  1. train.py +65 -0
train.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ from datasets import load_dataset
3
+ from transformers import TrainingArguments, Trainer
4
+
5
+ # Load LLAMA3 8B model
6
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
7
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
8
+
9
+ # Load datasets
10
+ python_codes_dataset = load_dataset('flytech/python-codes-25k', split='train')
11
+ streamlit_issues_dataset = load_dataset("andfanilo/streamlit-issues")
12
+ streamlit_docs_dataset = load_dataset("sai-lohith/streamlit_docs")
13
+
14
+ # Combine datasets
15
+ combined_dataset = python_codes_dataset['text'] + streamlit_issues_dataset['text'] + streamlit_docs_dataset['text']
16
+
17
+ # Define training arguments
18
+ training_args = TrainingArguments(
19
+ per_device_train_batch_size=2,
20
+ num_train_epochs=3,
21
+ logging_dir='./logs',
22
+ output_dir='./output',
23
+ overwrite_output_dir=True,
24
+ report_to="none" # Disable logging to avoid cluttering output
25
+ )
26
+
27
+ # Define training function
28
+ def tokenize_function(examples):
29
+ return tokenizer(examples["text"])
30
+
31
+ def group_texts(examples):
32
+ # Concatenate all texts.
33
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
34
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
35
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can customize this part to your needs.
36
+ total_length = (total_length // tokenizer.max_len) * tokenizer.max_len
37
+ # Split by chunks of max_len.
38
+ result = {
39
+ k: [t[i : i + tokenizer.max_len] for i in range(0, total_length, tokenizer.max_len)]
40
+ for k, t in concatenated_examples.items()
41
+ }
42
+ return result
43
+
44
+ # Tokenize dataset
45
+ tokenized_datasets = combined_dataset.map(tokenize_function, batched=True, num_proc=4)
46
+
47
+ # Group texts into chunks of max_len
48
+ tokenized_datasets = tokenized_datasets.map(
49
+ group_texts,
50
+ batched=True,
51
+ num_proc=4,
52
+ )
53
+
54
+ # Train the model
55
+ trainer = Trainer(
56
+ model=model,
57
+ args=training_args,
58
+ train_dataset=tokenized_datasets,
59
+ tokenizer=tokenizer,
60
+ )
61
+
62
+ trainer.train()
63
+
64
+ # Save the trained model
65
+ trainer.save_model("PyStreamlitGPT")