Upload folder using huggingface_hub
Browse files- autotrain_llm.py +99 -0
- finetune.py +66 -0
- requirements.txt +4 -0
- script.py +61 -0
autotrain_llm.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""AutoTrain_LLM.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/colabs/AutoTrain_LLM.ipynb
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
#@title 🤗 AutoTrain LLM
|
| 11 |
+
#@markdown In order to use this colab
|
| 12 |
+
#@markdown - upload train.csv to a folder named `data/`
|
| 13 |
+
#@markdown - train.csv must contain a `text` column
|
| 14 |
+
#@markdown - choose a project name if you wish
|
| 15 |
+
#@markdown - change model if you wish, you can use most of the text-generation models from Hugging Face Hub
|
| 16 |
+
#@markdown - add huggingface information (token) if you wish to push trained model to huggingface hub
|
| 17 |
+
#@markdown - update hyperparameters if you wish
|
| 18 |
+
#@markdown - click `Runtime > Run all` or run each cell individually
|
| 19 |
+
#@markdown - report issues / feature requests here: https://github.com/huggingface/autotrain-advanced/issues
|
| 20 |
+
|
| 21 |
+
import os
|
| 22 |
+
!pip install -U autotrain-advanced > install_logs.txt
|
| 23 |
+
!autotrain setup --colab > setup_logs.txt
|
| 24 |
+
|
| 25 |
+
#@markdown ---
|
| 26 |
+
#@markdown #### Project Config
|
| 27 |
+
#@markdown Note: if you are using a restricted/private model, you need to enter your Hugging Face token in the next step.
|
| 28 |
+
project_name = 'my-autotrain-llm' # @param {type:"string"}
|
| 29 |
+
model_name = 'abhishek/llama-2-7b-hf-small-shards' # @param {type:"string"}
|
| 30 |
+
|
| 31 |
+
#@markdown ---
|
| 32 |
+
#@markdown #### Push to Hub?
|
| 33 |
+
#@markdown Use these only if you want to push your trained model to a private repo in your Hugging Face Account
|
| 34 |
+
#@markdown If you dont use these, the model will be saved in Google Colab and you are required to download it manually.
|
| 35 |
+
#@markdown Please enter your Hugging Face write token. The trained model will be saved to your Hugging Face account.
|
| 36 |
+
#@markdown You can find your token here: https://huggingface.co/settings/tokens
|
| 37 |
+
push_to_hub = False # @param ["False", "True"] {type:"raw"}
|
| 38 |
+
hf_token = "hf_XXX" #@param {type:"string"}
|
| 39 |
+
hf_username = "abc" #@param {type:"string"}
|
| 40 |
+
|
| 41 |
+
#@markdown ---
|
| 42 |
+
#@markdown #### Hyperparameters
|
| 43 |
+
learning_rate = 2e-4 # @param {type:"number"}
|
| 44 |
+
num_epochs = 1 #@param {type:"number"}
|
| 45 |
+
batch_size = 1 # @param {type:"slider", min:1, max:32, step:1}
|
| 46 |
+
block_size = 1024 # @param {type:"number"}
|
| 47 |
+
trainer = "sft" # @param ["default", "sft", "orpo"] {type:"raw"}
|
| 48 |
+
warmup_ratio = 0.1 # @param {type:"number"}
|
| 49 |
+
weight_decay = 0.01 # @param {type:"number"}
|
| 50 |
+
gradient_accumulation = 4 # @param {type:"number"}
|
| 51 |
+
mixed_precision = "fp16" # @param ["fp16", "bf16", "none"] {type:"raw"}
|
| 52 |
+
peft = True # @param ["False", "True"] {type:"raw"}
|
| 53 |
+
quantization = "int4" # @param ["int4", "int8", "none"] {type:"raw"}
|
| 54 |
+
lora_r = 16 #@param {type:"number"}
|
| 55 |
+
lora_alpha = 32 #@param {type:"number"}
|
| 56 |
+
lora_dropout = 0.05 #@param {type:"number"}
|
| 57 |
+
|
| 58 |
+
os.environ["PROJECT_NAME"] = project_name
|
| 59 |
+
os.environ["MODEL_NAME"] = model_name
|
| 60 |
+
os.environ["PUSH_TO_HUB"] = str(push_to_hub)
|
| 61 |
+
os.environ["HF_TOKEN"] = hf_token
|
| 62 |
+
os.environ["LEARNING_RATE"] = str(learning_rate)
|
| 63 |
+
os.environ["NUM_EPOCHS"] = str(num_epochs)
|
| 64 |
+
os.environ["BATCH_SIZE"] = str(batch_size)
|
| 65 |
+
os.environ["BLOCK_SIZE"] = str(block_size)
|
| 66 |
+
os.environ["WARMUP_RATIO"] = str(warmup_ratio)
|
| 67 |
+
os.environ["WEIGHT_DECAY"] = str(weight_decay)
|
| 68 |
+
os.environ["GRADIENT_ACCUMULATION"] = str(gradient_accumulation)
|
| 69 |
+
os.environ["MIXED_PRECISION"] = str(mixed_precision)
|
| 70 |
+
os.environ["PEFT"] = str(peft)
|
| 71 |
+
os.environ["QUANTIZATION"] = str(quantization)
|
| 72 |
+
os.environ["LORA_R"] = str(lora_r)
|
| 73 |
+
os.environ["LORA_ALPHA"] = str(lora_alpha)
|
| 74 |
+
os.environ["LORA_DROPOUT"] = str(lora_dropout)
|
| 75 |
+
os.environ["HF_USERNAME"] = hf_username
|
| 76 |
+
os.environ["TRAINER"] = trainer
|
| 77 |
+
|
| 78 |
+
!autotrain llm \
|
| 79 |
+
--train \
|
| 80 |
+
--model ${MODEL_NAME} \
|
| 81 |
+
--project-name ${PROJECT_NAME} \
|
| 82 |
+
--data-path data/ \
|
| 83 |
+
--text-column text \
|
| 84 |
+
--lr ${LEARNING_RATE} \
|
| 85 |
+
--batch-size ${BATCH_SIZE} \
|
| 86 |
+
--epochs ${NUM_EPOCHS} \
|
| 87 |
+
--block-size ${BLOCK_SIZE} \
|
| 88 |
+
--warmup-ratio ${WARMUP_RATIO} \
|
| 89 |
+
--lora-r ${LORA_R} \
|
| 90 |
+
--lora-alpha ${LORA_ALPHA} \
|
| 91 |
+
--lora-dropout ${LORA_DROPOUT} \
|
| 92 |
+
--weight-decay ${WEIGHT_DECAY} \
|
| 93 |
+
--gradient-accumulation ${GRADIENT_ACCUMULATION} \
|
| 94 |
+
--quantization ${QUANTIZATION} \
|
| 95 |
+
--mixed-precision ${MIXED_PRECISION} \
|
| 96 |
+
--username ${HF_USERNAME} \
|
| 97 |
+
--trainer ${TRAINER} \
|
| 98 |
+
$( [[ "$PEFT" == "True" ]] && echo "--peft" ) \
|
| 99 |
+
$( [[ "$PUSH_TO_HUB" == "True" ]] && echo "--push-to-hub --token ${HF_TOKEN}" )
|
finetune.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Finetune.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1b_AA5GHhblSKrQymYs_uYYDEqvqklfrV
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
!pip install datasets transformers[torch]
|
| 11 |
+
|
| 12 |
+
!pip install evaluate
|
| 13 |
+
|
| 14 |
+
!pip install accelerate -U
|
| 15 |
+
|
| 16 |
+
from datasets import load_dataset
|
| 17 |
+
|
| 18 |
+
dataset = load_dataset("yelp_review_full")
|
| 19 |
+
dataset["train"][100]
|
| 20 |
+
|
| 21 |
+
from transformers import AutoTokenizer
|
| 22 |
+
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def tokenize_function(examples):
|
| 27 |
+
return tokenizer(examples["text"], padding="max_length", truncation=True)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
tokenized_datasets = dataset.map(tokenize_function, batched=True)
|
| 31 |
+
|
| 32 |
+
small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
|
| 33 |
+
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
|
| 34 |
+
|
| 35 |
+
from transformers import AutoModelForSequenceClassification
|
| 36 |
+
|
| 37 |
+
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
|
| 38 |
+
|
| 39 |
+
from transformers import TrainingArguments
|
| 40 |
+
|
| 41 |
+
training_args = TrainingArguments(output_dir="test_trainer")
|
| 42 |
+
|
| 43 |
+
import numpy as np
|
| 44 |
+
import evaluate
|
| 45 |
+
|
| 46 |
+
metric = evaluate.load("accuracy")
|
| 47 |
+
|
| 48 |
+
def compute_metrics(eval_pred):
|
| 49 |
+
logits, labels = eval_pred
|
| 50 |
+
predictions = np.argmax(logits, axis=-1)
|
| 51 |
+
return metric.compute(predictions=predictions, references=labels)
|
| 52 |
+
|
| 53 |
+
from transformers import TrainingArguments, Trainer
|
| 54 |
+
|
| 55 |
+
training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")
|
| 56 |
+
|
| 57 |
+
trainer = Trainer(
|
| 58 |
+
model=model,
|
| 59 |
+
args=training_args,
|
| 60 |
+
train_dataset=small_train_dataset,
|
| 61 |
+
eval_dataset=small_eval_dataset,
|
| 62 |
+
compute_metrics=compute_metrics,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
trainer.train()
|
| 66 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
datasets
|
| 2 |
+
transformers[torch]
|
| 3 |
+
evaluate
|
| 4 |
+
accelerate -U
|
script.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Finetune.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1b_AA5GHhblSKrQymYs_uYYDEqvqklfrV
|
| 8 |
+
"""
|
| 9 |
+
from datasets import load_dataset
|
| 10 |
+
|
| 11 |
+
dataset = load_dataset("yelp_review_full")
|
| 12 |
+
dataset["train"][100]
|
| 13 |
+
|
| 14 |
+
from transformers import AutoTokenizer
|
| 15 |
+
|
| 16 |
+
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def tokenize_function(examples):
|
| 20 |
+
return tokenizer(examples["text"], padding="max_length", truncation=True)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
tokenized_datasets = dataset.map(tokenize_function, batched=True)
|
| 24 |
+
|
| 25 |
+
small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(100))
|
| 26 |
+
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(100))
|
| 27 |
+
|
| 28 |
+
from transformers import AutoModelForSequenceClassification
|
| 29 |
+
|
| 30 |
+
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
|
| 31 |
+
|
| 32 |
+
from transformers import TrainingArguments
|
| 33 |
+
|
| 34 |
+
training_args = TrainingArguments(output_dir="test_trainer")
|
| 35 |
+
|
| 36 |
+
import numpy as np
|
| 37 |
+
import evaluate
|
| 38 |
+
|
| 39 |
+
metric = evaluate.load("accuracy")
|
| 40 |
+
|
| 41 |
+
def compute_metrics(eval_pred):
|
| 42 |
+
logits, labels = eval_pred
|
| 43 |
+
predictions = np.argmax(logits, axis=-1)
|
| 44 |
+
return metric.compute(predictions=predictions, references=labels)
|
| 45 |
+
|
| 46 |
+
from transformers import TrainingArguments, Trainer
|
| 47 |
+
|
| 48 |
+
training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")
|
| 49 |
+
|
| 50 |
+
trainer = Trainer(
|
| 51 |
+
model=model,
|
| 52 |
+
args=training_args,
|
| 53 |
+
train_dataset=small_train_dataset,
|
| 54 |
+
eval_dataset=small_eval_dataset,
|
| 55 |
+
compute_metrics=compute_metrics,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
trainer.train()
|
| 59 |
+
|
| 60 |
+
trainer.push_to_hub()
|
| 61 |
+
|