repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/trl | hf_public_repos/trl/benchmark/post_github_comment.sbatch | #!/bin/bash
#SBATCH --job-name=trl
#SBATCH --partition=production-cluster
#SBATCH --ntasks=1
#SBATCH --output=slurm/logs/%x_%j.out
sleep 2m
bash $BENCHMARK_PLOT_SCRIPT
srun python benchmark/post_github_comment.py
| 0 |
hf_public_repos/trl | hf_public_repos/trl/benchmark/plot.sh | # pip install openrlbenchmark==0.2.1a5
# see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation
BASELINE_PR_TAG=v0.4.7-55-g110e672
BASELINE_PR_NAME=PR-662
python -m openrlbenchmark.rlops_multi_metrics \
--filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \
"sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \
--env-ids sentiment-analysis:lvwerra/distilbert-imdb \
--no-check-empty-runs \
--pc.ncols 2 \
--pc.ncols-legend 1 \
--output-filename benchmark/trl/$BASELINE_PR_TAG/sentiment \
--scan-history
python -m openrlbenchmark.rlops_multi_metrics \
--filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \
"sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \
"sentiment_tuning_step_grad_accu?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb gradient accumulation ($BASELINE_PR_NAME)" \
--env-ids sentiment-analysis:lvwerra/distilbert-imdb \
--no-check-empty-runs \
--pc.ncols 2 \
--pc.ncols-legend 1 \
--output-filename benchmark/trl/$BASELINE_PR_TAG/gradient_accu \
--scan-history
python -m openrlbenchmark.rlops_multi_metrics \
--filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \
"sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \
"sentiment_tuning_gpt2?tag=$BASELINE_PR_TAG&cl=sentiment gpt2 ($BASELINE_PR_NAME)" \
"sentiment_tuning_falcon_rw_1b?tag=$BASELINE_PR_TAG&cl=sentiment tiiuae/falcon-rw-1b ($BASELINE_PR_NAME)" \
"sentiment_tuning_gpt2xl_grad_accu?tag=$BASELINE_PR_TAG&cl=sentiment gpt2xl ($BASELINE_PR_NAME)" \
--env-ids sentiment-analysis:lvwerra/distilbert-imdb \
--no-check-empty-runs \
--pc.ncols 2 \
--pc.ncols-legend 1 \
--output-filename benchmark/trl/$BASELINE_PR_TAG/different_models \
--scan-history
python -m openrlbenchmark.rlops_multi_metrics \
--filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \
"sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \
"sentiment_tuning_peft?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb w/ peft ($BASELINE_PR_NAME)" \
--env-ids sentiment-analysis:lvwerra/distilbert-imdb \
--no-check-empty-runs \
--pc.ncols 2 \
--pc.ncols-legend 1 \
--output-filename benchmark/trl/$BASELINE_PR_TAG/peft \
--scan-history
python benchmark/upload_benchmark.py \
--folder_path="benchmark/trl/$BASELINE_PR_TAG" \
--path_in_repo="images/benchmark/$BASELINE_PR_TAG" \
--repo_id="trl-internal-testing/example-images" \
--repo_type="dataset" | 0 |
hf_public_repos/trl | hf_public_repos/trl/benchmark/trl.slurm_template | #!/bin/bash
#SBATCH --job-name=trl
#SBATCH --partition=production-cluster
#SBATCH --gpus-per-task={{gpus_per_task}}
#SBATCH --cpus-per-gpu={{cpus_per_gpu}}
#SBATCH --ntasks={{ntasks}}
#SBATCH --output=slurm/logs/%x_%j.out
#SBATCH --array={{array}}
#SBATCH --exclude=ip-26-0-156-239,ip-26-0-148-151,ip-26-0-146-212,ip-26-0-145-137,ip-26-0-146-249,ip-26-0-146-149,ip-26-0-147-233,ip-26-0-145-154,ip-26-0-144-35,ip-26-0-144-189,ip-26-0-146-183,ip-26-0-147-120,ip-26-0-144-95,ip-26-0-145-193
{{nodes}}
seeds={{seeds}}
seed=${seeds[$SLURM_ARRAY_TASK_ID % {{len_seeds}}]}
echo "Running task $SLURM_ARRAY_TASK_ID with seed: $seed"
srun {{command}} --ppo_config.seed $seed
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_modeling_value_head.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
import torch
from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM
from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, create_reference_model
ALL_CAUSAL_LM_MODELS = [
"trl-internal-testing/tiny-random-CodeGenForCausalLM",
"trl-internal-testing/tiny-random-GPTJForCausalLM",
"trl-internal-testing/tiny-random-GPTNeoForCausalLM",
"trl-internal-testing/tiny-random-GPTNeoXForCausalLM",
"trl-internal-testing/tiny-random-OPTForCausalLM",
"trl-internal-testing/tiny-random-BloomForCausalLM",
"trl-internal-testing/tiny-random-GPT2LMHeadModel",
"trl-internal-testing/tiny-random-CodeGenForCausalLM-sharded",
"trl-internal-testing/tiny-random-GPTNeoXForCausalLM-safetensors-sharded",
"trl-internal-testing/tiny-random-GPTNeoXForCausalLM-safetensors"
# "trl-internal-testing/tiny-random-LlamaForCausalLM", uncomment on the next transformers release
]
ALL_SEQ2SEQ_MODELS = [
"trl-internal-testing/tiny-random-BartForConditionalGeneration",
"trl-internal-testing/tiny-random-BigBirdPegasusForConditionalGeneration",
"trl-internal-testing/tiny-random-BlenderbotForConditionalGeneration",
"trl-internal-testing/tiny-random-BlenderbotSmallForConditionalGeneration",
"trl-internal-testing/tiny-random-FSMTForConditionalGeneration",
"trl-internal-testing/tiny-random-LEDForConditionalGeneration",
"trl-internal-testing/tiny-random-LongT5ForConditionalGeneration",
"trl-internal-testing/tiny-random-M2M100ForConditionalGeneration",
"trl-internal-testing/tiny-random-MarianMTModel",
"trl-internal-testing/tiny-random-MBartForConditionalGeneration",
"trl-internal-testing/tiny-random-MT5ForConditionalGeneration",
"trl-internal-testing/tiny-random-MvpForConditionalGeneration",
"trl-internal-testing/tiny-random-PegasusForConditionalGeneration",
"trl-internal-testing/tiny-random-PegasusXForConditionalGeneration",
"trl-internal-testing/tiny-random-PLBartForConditionalGeneration",
"trl-internal-testing/tiny-random-ProphetNetForConditionalGeneration",
"trl-internal-testing/tiny-random-SwitchTransformersForConditionalGeneration",
"trl-internal-testing/tiny-random-T5ForConditionalGeneration",
]
class VHeadModelTester:
all_model_names = None
trl_model_class = None
transformers_model_class = None
def test_value_head(self):
r"""
Test if the v-head is added to the model successfully
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
self.assertTrue(hasattr(model, "v_head"))
def test_value_head_shape(self):
r"""
Test if the v-head has the correct shape
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
self.assertTrue(model.v_head.summary.weight.shape[0] == 1)
def test_value_head_init_random(self):
r"""
Test if the v-head has been randomly initialized.
We can check that by making sure the bias is different
than zeros by default.
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
self.assertFalse(torch.allclose(model.v_head.summary.bias, torch.zeros_like(model.v_head.summary.bias)))
def test_value_head_not_str(self):
r"""
Test if the v-head is added to the model successfully, by passing a non `PretrainedModel`
as an argument to `from_pretrained`.
"""
for model_name in self.all_model_names:
pretrained_model = self.transformers_model_class.from_pretrained(model_name)
model = self.trl_model_class.from_pretrained(pretrained_model)
self.assertTrue(hasattr(model, "v_head"))
def test_from_save_trl(self):
"""
Test if the model can be saved and loaded from a directory and get the same weights
Including the additional modules (e.g. v_head)
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model_from_save = self.trl_model_class.from_pretrained(tmp_dir)
# Check if the weights are the same
for key in model_from_save.state_dict():
self.assertTrue(torch.allclose(model_from_save.state_dict()[key], model.state_dict()[key]))
def test_from_save_trl_sharded(self):
"""
Test if the model can be saved and loaded from a directory and get the same weights - sharded case
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model_from_save = self.trl_model_class.from_pretrained(tmp_dir)
# Check if the weights are the same
for key in model_from_save.state_dict():
self.assertTrue(torch.allclose(model_from_save.state_dict()[key], model.state_dict()[key]))
def test_from_save_transformers_sharded(self):
"""
Test if the model can be saved and loaded using transformers and get the same weights - sharded case
"""
for model_name in self.all_model_names:
transformers_model = self.trl_model_class.transformers_parent_class.from_pretrained(model_name)
trl_model = self.trl_model_class.from_pretrained(model_name)
with tempfile.TemporaryDirectory() as tmp_dir:
trl_model.save_pretrained(tmp_dir, max_shard_size="1MB")
transformers_model_from_save = self.trl_model_class.transformers_parent_class.from_pretrained(tmp_dir)
# Check if the weights are the same
for key in transformers_model.state_dict():
self.assertTrue(
torch.allclose(
transformers_model_from_save.state_dict()[key], transformers_model.state_dict()[key]
)
)
def test_from_save_transformers(self):
"""
Test if the model can be saved and loaded using transformers and get the same weights.
We override the test of the super class to check if the weights are the same.
"""
for model_name in self.all_model_names:
transformers_model = self.trl_model_class.transformers_parent_class.from_pretrained(model_name)
trl_model = self.trl_model_class.from_pretrained(model_name)
with tempfile.TemporaryDirectory() as tmp_dir:
trl_model.save_pretrained(tmp_dir)
transformers_model_from_save = self.trl_model_class.transformers_parent_class.from_pretrained(tmp_dir)
# Check if the weights are the same
for key in transformers_model.state_dict():
self.assertTrue(
torch.allclose(
transformers_model_from_save.state_dict()[key], transformers_model.state_dict()[key]
)
)
# Check if the trl model has the same keys as the transformers model
# except the v_head
for key in trl_model.state_dict():
if "v_head" not in key:
self.assertTrue(key in transformers_model.state_dict())
# check if the weights are the same
self.assertTrue(torch.allclose(trl_model.state_dict()[key], transformers_model.state_dict()[key]))
# check if they have the same modules
self.assertTrue(
set(transformers_model_from_save.state_dict().keys()) == set(transformers_model.state_dict().keys())
)
class CausalLMValueHeadModelTester(VHeadModelTester, unittest.TestCase):
"""
Testing suite for v-head models.
"""
all_model_names = ALL_CAUSAL_LM_MODELS
trl_model_class = AutoModelForCausalLMWithValueHead
transformers_model_class = AutoModelForCausalLM
def tearDown(self):
# free memory
gc.collect()
def test_inference(self):
r"""
Test if the model can be used for inference and outputs 3 values
- logits, loss, and value states
"""
EXPECTED_OUTPUT_SIZE = 3
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
outputs = model(input_ids)
# Check if the outputs are of the right size - here
# we always output 3 values - logits, loss, and value states
self.assertEqual(len(outputs), EXPECTED_OUTPUT_SIZE)
def test_dropout_config(self):
r"""
Test if we instantiate a model by adding `summary_drop_prob` to the config
it will be added to the v_head
"""
for model_name in self.all_model_names:
pretrained_model = self.transformers_model_class.from_pretrained(model_name)
pretrained_model.config.summary_dropout_prob = 0.5
model = self.trl_model_class.from_pretrained(pretrained_model)
# Check if v head of the model has the same dropout as the config
self.assertEqual(model.v_head.dropout.p, pretrained_model.config.summary_dropout_prob)
def test_dropout_kwargs(self):
r"""
Test if we instantiate a model by adding `summary_drop_prob` to the config
it will be added to the v_head
"""
for model_name in self.all_model_names:
v_head_kwargs = {"summary_dropout_prob": 0.5}
model = self.trl_model_class.from_pretrained(model_name, **v_head_kwargs)
# Check if v head of the model has the same dropout as the config
self.assertEqual(model.v_head.dropout.p, 0.5)
model = self.trl_model_class.from_pretrained(model_name, summary_dropout_prob=0.5)
# Check if v head of the model has the same dropout as the config
self.assertEqual(model.v_head.dropout.p, 0.5)
def test_generate(self):
r"""
Test if `generate` works for every model
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
# Just check if the generation works
_ = model.generate(input_ids)
def test_raise_error_not_causallm(self):
# Test with a model without a LM head
model_id = "trl-internal-testing/tiny-random-GPT2Model"
# This should raise a ValueError
with self.assertRaises(ValueError):
pretrained_model = AutoModelForCausalLM.from_pretrained(model_id)
_ = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model.transformer)
def test_transformers_bf16_kwargs(self):
r"""
Test if the transformers kwargs are correctly passed
Here we check that loading a model in half precision works as expected, i.e. the weights of
the `pretrained_model` attribute is loaded in half precision and you can run a dummy
forward pass without any issue.
"""
for model_name in self.all_model_names:
trl_model = self.trl_model_class.from_pretrained(model_name, torch_dtype=torch.bfloat16)
lm_head_namings = self.trl_model_class.lm_head_namings
self.assertTrue(
any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings)
)
for lm_head_naming in lm_head_namings:
if hasattr(trl_model.pretrained_model, lm_head_naming):
self.assertTrue(getattr(trl_model.pretrained_model, lm_head_naming).weight.dtype == torch.bfloat16)
dummy_input = torch.LongTensor([[0, 1, 0, 1]])
# check dummy forward pass works in half precision
_ = trl_model(dummy_input)
@unittest.skip("This test needs to be run manually due to HF token issue.")
def test_push_to_hub(self):
for model_name in self.all_model_names:
model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name)
if "sharded" in model_name:
model.push_to_hub(model_name + "-ppo", use_auth_token=True, max_shard_size="1MB")
else:
model.push_to_hub(model_name + "-ppo", use_auth_token=True)
model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(model_name + "-ppo")
# check all keys
self.assertEqual(model.state_dict().keys(), model_from_pretrained.state_dict().keys())
for name, param in model.state_dict().items():
self.assertTrue(
torch.allclose(param, model_from_pretrained.state_dict()[name]),
f"Parameter {name} is not the same after push_to_hub and from_pretrained",
)
class Seq2SeqValueHeadModelTester(VHeadModelTester, unittest.TestCase):
"""
Testing suite for v-head models.
"""
all_model_names = ALL_SEQ2SEQ_MODELS
trl_model_class = AutoModelForSeq2SeqLMWithValueHead
transformers_model_class = AutoModelForSeq2SeqLM
def tearDown(self):
# free memory
gc.collect()
def test_inference(self):
r"""
Test if the model can be used for inference and outputs 3 values
- logits, loss, and value states
"""
EXPECTED_OUTPUT_SIZE = 3
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
decoder_input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
outputs = model(input_ids, decoder_input_ids=decoder_input_ids)
# Check if the outputs are of the right size - here
# we always output 3 values - logits, loss, and value states
self.assertEqual(len(outputs), EXPECTED_OUTPUT_SIZE)
def test_dropout_config(self):
r"""
Test if we instantiate a model by adding `summary_drop_prob` to the config
it will be added to the v_head
"""
for model_name in self.all_model_names:
pretrained_model = self.transformers_model_class.from_pretrained(model_name)
pretrained_model.config.summary_dropout_prob = 0.5
model = self.trl_model_class.from_pretrained(pretrained_model)
# Check if v head of the model has the same dropout as the config
self.assertEqual(model.v_head.dropout.p, pretrained_model.config.summary_dropout_prob)
def test_dropout_kwargs(self):
r"""
Test if we instantiate a model by adding `summary_drop_prob` to the config
it will be added to the v_head
"""
for model_name in self.all_model_names:
v_head_kwargs = {"summary_dropout_prob": 0.5}
model = self.trl_model_class.from_pretrained(model_name, **v_head_kwargs)
# Check if v head of the model has the same dropout as the config
self.assertEqual(model.v_head.dropout.p, 0.5)
model = self.trl_model_class.from_pretrained(model_name, summary_dropout_prob=0.5)
# Check if v head of the model has the same dropout as the config
self.assertEqual(model.v_head.dropout.p, 0.5)
def test_generate(self):
r"""
Test if `generate` works for every model
"""
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
decoder_input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
# Just check if the generation works
_ = model.generate(input_ids, decoder_input_ids=decoder_input_ids)
def test_raise_error_not_causallm(self):
# Test with a model without a LM head
model_id = "trl-internal-testing/tiny-random-T5Model"
# This should raise a ValueError
with self.assertRaises(ValueError):
pretrained_model = AutoModel.from_pretrained(model_id)
_ = self.trl_model_class.from_pretrained(pretrained_model)
@unittest.skip("This test needs to be run manually due to HF token issue.")
def test_push_to_hub(self):
for model_name in self.all_model_names:
model = self.trl_model_class.from_pretrained(model_name)
if "sharded" in model_name:
model.push_to_hub(model_name + "-ppo", use_auth_token=True, max_shard_size="1MB")
else:
model.push_to_hub(model_name + "-ppo", use_auth_token=True)
model_from_pretrained = self.trl_model_class.from_pretrained(model_name + "-ppo")
# check all keys
self.assertEqual(model.state_dict().keys(), model_from_pretrained.state_dict().keys())
for name, param in model.state_dict().items():
self.assertTrue(
torch.allclose(param, model_from_pretrained.state_dict()[name]),
f"Parameter {name} is not the same after push_to_hub and from_pretrained",
)
def test_transformers_bf16_kwargs(self):
r"""
Test if the transformers kwargs are correctly passed
Here we check that loading a model in half precision works as expected, i.e. the weights of
the `pretrained_model` attribute is loaded in half precision and you can run a dummy
forward pass without any issue.
"""
for model_name in self.all_model_names:
trl_model = self.trl_model_class.from_pretrained(model_name, torch_dtype=torch.bfloat16)
lm_head_namings = self.trl_model_class.lm_head_namings
if model_name == "trl-internal-testing/tiny-random-FSMTForConditionalGeneration":
# skip the test for FSMT as it does not support mixed-prec
continue
self.assertTrue(
any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings)
)
for lm_head_naming in lm_head_namings:
if hasattr(trl_model.pretrained_model, lm_head_naming):
self.assertTrue(getattr(trl_model.pretrained_model, lm_head_naming).weight.dtype == torch.bfloat16)
dummy_input = torch.LongTensor([[0, 1, 0, 1]])
# check dummy forward pass works in half precision
_ = trl_model(input_ids=dummy_input, decoder_input_ids=dummy_input)
class ReferenceModelTest(unittest.TestCase):
def setUp(self):
self.model = AutoModelForCausalLMWithValueHead.from_pretrained(
"trl-internal-testing/tiny-random-GPT2LMHeadModel"
)
self.test_input = torch.tensor([[0, 1, 2, 3]])
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1)
self.layer_format = "pretrained_model.transformer.h.{layer}.attn.c_attn.weight"
def test_independent_reference(self):
layer_0 = self.layer_format.format(layer=0)
layer_5 = self.layer_format.format(layer=4)
ref_model = create_reference_model(self.model)
first_layer_before = self.model.get_parameter(layer_0).data.clone()
last_layer_before = self.model.get_parameter(layer_5).data.clone()
first_ref_layer_before = ref_model.get_parameter(layer_0).data.clone()
last_ref_layer_before = ref_model.get_parameter(layer_5).data.clone()
output = self.model(input_ids=self.test_input, labels=self.test_input)
output[1].backward()
self.optimizer.step()
first_layer_after = self.model.get_parameter(layer_0).data.clone()
last_layer_after = self.model.get_parameter(layer_5).data.clone()
first_ref_layer_after = ref_model.get_parameter(layer_0).data.clone()
last_ref_layer_after = ref_model.get_parameter(layer_5).data.clone()
# before optimization ref and model are identical
self.assertTrue((first_layer_before == first_ref_layer_before).all())
self.assertTrue((last_layer_before == last_ref_layer_before).all())
# ref model stays identical after optimization
self.assertTrue((first_ref_layer_before == first_ref_layer_after).all())
self.assertTrue((last_ref_layer_before == last_ref_layer_after).all())
# optimized model changes
self.assertTrue(not (first_layer_before == first_layer_after).all())
self.assertTrue(not (last_layer_before == last_layer_after).all())
def test_shared_layers(self):
layer_0 = self.layer_format.format(layer=0)
layer_1 = self.layer_format.format(layer=1)
ref_model = create_reference_model(self.model, num_shared_layers=1)
first_layer_before = self.model.get_parameter(layer_0).data.clone()
second_layer_before = self.model.get_parameter(layer_1).data.clone()
first_ref_layer_before = ref_model.get_parameter(layer_0).data.clone()
second_ref_layer_before = ref_model.get_parameter(layer_1).data.clone()
output = self.model(input_ids=self.test_input, labels=self.test_input)
output[1].backward()
self.optimizer.step()
first_layer_after = self.model.get_parameter(layer_0).data.clone()
second_layer_after = self.model.get_parameter(layer_1).data.clone()
first_ref_layer_after = ref_model.get_parameter(layer_0).data.clone()
second_ref_layer_after = ref_model.get_parameter(layer_1).data.clone()
# before optimization ref and model are identical
self.assertTrue((first_layer_before == first_ref_layer_before).all())
self.assertTrue((second_layer_before == second_ref_layer_before).all())
# ref model stays identical after optimization
self.assertTrue((first_ref_layer_before == first_ref_layer_after).all())
self.assertTrue((second_ref_layer_before == second_ref_layer_after).all())
# first layer of optimized model stays the same
self.assertTrue((first_layer_before == first_layer_after).all())
# other layers in optimized model change
self.assertTrue(not (second_layer_before == second_layer_after).all())
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_best_of_n_sampler.py | import unittest
import torch
from transformers import AutoTokenizer, GenerationConfig
from trl import AutoModelForCausalLMWithValueHead
from trl.core import LengthSampler
from trl.extras import BestOfNSampler
def queries_to_scores(list_of_strings):
return [torch.rand(1).item() for _ in list_of_strings]
class BestOfNSamplerTester(unittest.TestCase):
"""
Tests the BestOfNSampler class
"""
ref_model_name = "trl-internal-testing/dummy-GPT2-correct-vocab"
output_length_sampler = LengthSampler(2, 6)
model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name)
tokenizer = AutoTokenizer.from_pretrained(ref_model_name)
tokenizer.pad_token = tokenizer.eos_token
output_length_sampler = LengthSampler(2, 6)
def test_different_input_types(self):
r"""
Tests if the different input types normalizer works
"""
generation_config = GenerationConfig(
min_length=-1,
top_k=0.0,
top_p=1.0,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id,
)
output_length_sampler = LengthSampler(2, 6)
best_of_n = BestOfNSampler(
self.model,
self.tokenizer,
queries_to_scores,
length_sampler=output_length_sampler,
generation_config=generation_config,
)
queries = ["hello world", "goodbye world"]
tokenized_queries = [self.tokenizer.encode(query) for query in queries]
various_queries_formats = [
(tokenized_queries[0], 1),
(tokenized_queries, 2),
(torch.tensor(tokenized_queries[1]), 1),
([torch.tensor(query) for query in tokenized_queries], 2),
]
for q, expected_length in various_queries_formats:
results = best_of_n.generate(q)
self.assertIsInstance(results, list)
assert len(results) == expected_length
def test_different_sample_sizes_and_n_candidates_values(self):
r"""
Tests different sample sizes and n_candidates values
"""
generation_config = GenerationConfig(
min_length=-1,
top_k=0.0,
top_p=1.0,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id,
)
output_length_sampler = LengthSampler(6, 10)
for sample_value, n_candidates_values, expected in [
(4, 2, 2),
(10, 3, 3),
(6, 4, 4),
]:
best_of_n = BestOfNSampler(
self.model,
self.tokenizer,
queries_to_scores,
length_sampler=output_length_sampler,
generation_config=generation_config,
sample_size=sample_value,
n_candidates=n_candidates_values,
)
queries = ["hello world", "troll the world"]
tokenized_queries = [self.tokenizer.encode(query) for query in queries]
results = best_of_n.generate(tokenized_queries)
for result in results:
assert len(result) == expected
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_environments.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
import torch
from transformers import AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead, TextEnvironment, TextHistory
class DummyTool:
def __call__(self, text):
return text
def dummy_generate(histories):
for i in range(len(histories)):
histories[i].append_segment("<request><DummyTool>test<call>", torch.tensor([1, 2, 3]), system=False)
return histories
class TextHistoryTest(unittest.TestCase):
def test_text_history_init(self):
text = "Hello there!"
tokens = torch.tensor([1, 2, 3])
history = TextHistory(text, tokens)
self.assertEqual(history.text, text)
self.assertTrue(torch.equal(history.tokens, tokens))
self.assertTrue(torch.equal(history.token_masks, torch.zeros_like(tokens)))
history = TextHistory(text, tokens, system=False)
self.assertTrue(torch.equal(history.token_masks, torch.ones_like(tokens)))
def test_text_history_append_segment(self):
text = "Hello there!"
tokens = torch.tensor([1, 2, 3])
history = TextHistory(text, tokens)
history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]), system=False)
self.assertEqual(history.text, text + "General Kenobi!")
self.assertTrue(torch.equal(history.tokens, torch.tensor([1, 2, 3, 4, 5, 6])))
self.assertTrue(torch.equal(history.token_masks, torch.tensor([0, 0, 0, 1, 1, 1])))
history.append_segment("You are a bold one!", torch.tensor([7, 8, 9]))
self.assertEqual(history.text, text + "General Kenobi!" + "You are a bold one!")
self.assertTrue(torch.equal(history.tokens, torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9])))
self.assertTrue(torch.equal(history.token_masks, torch.tensor([0, 0, 0, 1, 1, 1, 0, 0, 0])))
def test_text_history_complete(self):
text = "Hello there!"
tokens = torch.tensor([1, 2, 3])
history = TextHistory(text, tokens)
history.complete()
self.assertTrue(history.completed)
self.assertFalse(history.truncated)
history.complete(truncated=True)
self.assertTrue(history.completed)
self.assertTrue(history.truncated)
def test_text_history_last_segment(self):
text = "Hello there!"
tokens = torch.tensor([1, 2, 3])
history = TextHistory(text, tokens)
history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]))
history.append_segment("You are a bold one!", torch.tensor([7, 8, 9]))
self.assertEqual(history.last_text_segment, "You are a bold one!")
def test_text_history_split_query_response(self):
text = "Hello there!"
tokens = torch.tensor([1, 2, 3])
history = TextHistory(text, tokens)
history.append_segment("General Kenobi!", torch.tensor([4, 5, 6]), system=False)
history.append_segment("You are a bold one!", torch.tensor([7, 8, 9]), system=True)
query, response, mask = history.split_query_response_tokens()
self.assertTrue(torch.equal(query, torch.tensor([1, 2, 3])))
self.assertTrue(torch.equal(response, torch.tensor([4, 5, 6, 7, 8, 9])))
self.assertTrue(torch.equal(mask, torch.tensor([1, 1, 1, 0, 0, 0])))
class TextEnvironmentTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
# model_id
cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
# get models and tokenizer
cls.gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(cls.model_id)
cls.gpt2_tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.gpt2_tokenizer.pad_token = cls.gpt2_tokenizer.eos_token
def test_text_environment_setup(self):
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools=[DummyTool()],
reward_fn=lambda x: torch.tensor(1),
prompt="I am a prompt!\n",
)
self.assertEqual(env.prompt, "I am a prompt!\n")
self.assertEqual(list(env.tools.keys()), ["DummyTool"])
self.assertTrue(isinstance(env.tools["DummyTool"], DummyTool))
self.assertEqual(env.reward_fn("Hello there!"), 1)
def test_text_environment_generate(self):
generation_kwargs = {"do_sample": False, "max_new_tokens": 4, "pad_token_id": self.gpt2_tokenizer.eos_token_id}
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools=[DummyTool()],
reward_fn=lambda x: torch.tensor(1),
prompt="I am a prompt!\n",
generation_kwargs=generation_kwargs,
)
input_texts = ["this is a test", "this is another, longer test"]
model_inputs = [self.gpt2_tokenizer(txt, return_tensors="pt").input_ids.squeeze() for txt in input_texts]
generations_batched = env._generate_batched(model_inputs, batch_size=2)
generations_batched = self.gpt2_tokenizer.batch_decode(generations_batched)
generations_single = [env._generate_batched([inputs], batch_size=1)[0] for inputs in model_inputs]
generations_single = self.gpt2_tokenizer.batch_decode(generations_single)
self.assertEqual(generations_single, generations_batched)
def test_text_environment_tool_call_parsing(self):
string_valid = "Something something <request><Tool1>Hello there!<call>"
string_invalid_request = "Something something <Tool1>Hello there!<call>"
string_invalid_call = "Something something <request><Tool1>Hello there!"
string_invalid_tool = "Something something <request>|Tool2|Hello there!<call>"
string_invalid_random = "<>abcdefghijklm<>nopqrstuvwxyz<>"
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools=[DummyTool()],
reward_fn=lambda x: torch.tensor(1),
prompt="I am a prompt!\n",
)
tool, response = env.parse_tool_call(string_valid)
self.assertEqual(tool, "Tool1")
self.assertEqual(response, "Hello there!")
tool, response = env.parse_tool_call(string_invalid_request)
self.assertEqual(tool, None)
self.assertEqual(response, None)
tool, response = env.parse_tool_call(string_invalid_call)
self.assertEqual(tool, None)
self.assertEqual(response, None)
tool, response = env.parse_tool_call(string_invalid_tool)
self.assertEqual(tool, None)
self.assertEqual(response, None)
tool, response = env.parse_tool_call(string_invalid_random)
self.assertEqual(tool, None)
self.assertEqual(response, None)
def test_text_environment_tool_truncation(self):
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools={"dummy": lambda x: "a" * 1000},
reward_fn=lambda x: torch.tensor(1),
prompt="I am a prompt!\n",
)
env.max_tool_response = 100
history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3])))
self.assertEqual(len(history.last_text_segment) - len(env.response_token), 100)
env.max_tool_response = 500
history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3])))
self.assertEqual(len(history.last_text_segment) - len(env.response_token), 500)
env.max_tool_response = 1001
history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3])))
self.assertEqual(len(history.last_text_segment) - len(env.response_token), 1000)
env.max_tool_response = 2000
history = env.step(TextHistory("<request><dummy>Hello there!<call>", torch.tensor([1, 2, 3])))
self.assertEqual(len(history.last_text_segment) - len(env.response_token), 1000)
@patch.object(TextEnvironment, "generate", side_effect=dummy_generate)
def test_text_environment_max_calls(self, mock_generate):
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools={"DummyTool": DummyTool()},
reward_fn=lambda x: [torch.tensor(1) for _ in x],
prompt="I am a prompt!\n",
)
env.max_turns = 1
_, _, _, _, histories = env.run(["test"])
self.assertEqual(
histories[0].text, "I am a prompt!\n" + "test" + 1 * "<request><DummyTool>test<call>test<response>"
)
env.max_turns = 2
_, _, _, _, histories = env.run(["test"])
self.assertEqual(
histories[0].text, "I am a prompt!\n" + "test" + 2 * "<request><DummyTool>test<call>test<response>"
)
env.max_turns = 4
_, _, _, _, histories = env.run(["test"])
self.assertEqual(
histories[0].text, "I am a prompt!\n" + "test" + 4 * "<request><DummyTool>test<call>test<response>"
)
def test_text_environment_compute_rewards(self):
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools={"DummyTool": DummyTool()},
reward_fn=lambda x: [torch.tensor(i) for i, _ in enumerate(x)],
prompt="I am a prompt!\n",
)
histories = [TextHistory("<request><DummyTool>test<call>", torch.tensor([1, 2, 3])) for _ in range(8)]
histories = env.compute_reward(histories)
for i in range(8):
self.assertEqual(histories[i].reward, i)
@patch.object(TextEnvironment, "generate", side_effect=dummy_generate)
def test_text_environment_run(self, mock_generate):
env = TextEnvironment(
self.gpt2_model,
self.gpt2_tokenizer,
tools={"DummyTool": DummyTool()},
reward_fn=lambda x: [torch.tensor(i) for i, _ in enumerate(x)],
prompt="I am a prompt!\n",
max_turns=2,
)
task_1 = "Hello there!"
task_2 = "Hello there! General Kenobi!"
query, response, response_mask, reward, histories = env.run([task_1, task_2])
self.assertEqual(len(query[0]), 9)
self.assertEqual(len(query[1]), 12)
self.assertEqual(len(response[0]), 14)
self.assertEqual(len(response[1]), 14)
self.assertEqual(response_mask[0].sum(), 2 * 3) # mocked generate always adds 3 toknes
self.assertEqual(response_mask[1].sum(), 2 * 3) # mocked generate always adds 3 toknes
self.assertEqual(reward[0], 0)
self.assertEqual(reward[1], 1)
self.assertEqual(
histories[0].text, "I am a prompt!\n" + "Hello there!" + 2 * "<request><DummyTool>test<call>test<response>"
)
self.assertEqual(
histories[1].text,
"I am a prompt!\n" + "Hello there! General Kenobi!" + 2 * "<request><DummyTool>test<call>test<response>",
)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/testing_constants.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_reward_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from datasets import Dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction
from trl import RewardConfig, RewardTrainer
from trl.trainer import compute_accuracy
from .testing_utils import require_peft
class RewardTrainerTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
cls.model = AutoModelForSequenceClassification.from_pretrained(cls.model_id)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.tokenizer.pad_token = cls.tokenizer.eos_token
def test_accuracy_metrics(self):
dummy_eval_predictions = EvalPrediction(torch.FloatTensor([[0.1, 0.9], [0.9, 0.1]]), torch.LongTensor([0, 0]))
accuracy = compute_accuracy(dummy_eval_predictions)
self.assertEqual(accuracy["accuracy"], 0.5)
def test_reward_trainer(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = RewardConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
)
# fmt: off
dummy_dataset_dict = {
"input_ids_chosen": [
torch.LongTensor([0, 1, 2,]),
torch.LongTensor([1, 2]),
torch.LongTensor([0, 1, 2,]),
torch.LongTensor([1, 2]),
],
"attention_mask_chosen": [
torch.LongTensor([1, 1, 1]),
torch.LongTensor([1, 0]),
torch.LongTensor([1, 1, 1]),
torch.LongTensor([1, 0]),
],
"input_ids_rejected": [
torch.LongTensor([0, 2,]),
torch.LongTensor([1, 2, 0]),
torch.LongTensor([0, 2,]),
torch.LongTensor([1, 2, 0]),
],
"attention_mask_rejected": [
torch.LongTensor([1, 1]),
torch.LongTensor([1, 1, 0]),
torch.LongTensor([1, 1]),
torch.LongTensor([1, 1, 1]),
],
}
# fmt: on
dummy_dataset = Dataset.from_dict(dummy_dataset_dict)
trainer = RewardTrainer(
model=self.model,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# check the params have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
# check the params have changed - ignore 0 biases
if param.sum() != 0:
self.assertFalse(torch.equal(param, new_param))
preds = trainer.predict(dummy_dataset)
self.assertEqual(preds.predictions.shape, (4, 2))
@require_peft
def test_reward_trainer_peft(self):
import peft
from peft import LoraConfig, TaskType
peft_version = peft.__version__
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = RewardConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=6,
remove_unused_columns=False,
gradient_accumulation_steps=2,
learning_rate=9e-1,
evaluation_strategy="steps",
)
# fmt: off
dummy_dataset_dict = {
"input_ids_chosen": [
torch.LongTensor([0, 1, 2,]),
torch.LongTensor([1, 2]),
torch.LongTensor([0, 1, 2,]),
torch.LongTensor([1, 2]),
],
"attention_mask_chosen": [
torch.LongTensor([1, 1, 1]),
torch.LongTensor([1, 0]),
torch.LongTensor([1, 1, 1]),
torch.LongTensor([1, 0]),
],
"input_ids_rejected": [
torch.LongTensor([0, 2,]),
torch.LongTensor([1, 2, 0]),
torch.LongTensor([0, 2,]),
torch.LongTensor([1, 2, 0]),
],
"attention_mask_rejected": [
torch.LongTensor([1, 1]),
torch.LongTensor([1, 1, 0]),
torch.LongTensor([1, 1]),
torch.LongTensor([1, 1, 1]),
],
}
# fmt: on
dummy_dataset = Dataset.from_dict(dummy_dataset_dict)
trainer = RewardTrainer(
model=self.model,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
peft_config=peft_config,
)
previous_trainable_params = {}
previous_non_trainable_params = {}
# due to a change in the way the modules to save are dealt in PEFT.
trainable_params_name = ["lora", "score"] if peft_version < "0.3.0" else ["lora", "modules_to_save"]
# check gradients are not None
for n, param in trainer.model.named_parameters():
if any([t in n for t in trainable_params_name]):
previous_trainable_params[n] = param.clone()
else:
previous_non_trainable_params[n] = param.clone()
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# check the params have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
self.assertFalse(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12))
# check the non trainable params have not changed
for n, param in previous_non_trainable_params.items():
new_param = trainer.model.get_parameter(n)
self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12))
preds = trainer.predict(dummy_dataset)
self.assertEqual(preds.predictions.shape, (4, 2))
def test_reward_trainer_assert_value_error(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = RewardConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=1,
remove_unused_columns=False,
)
dummy_dataset_dict = {
# fmt: off
"input_ids_b": [
torch.LongTensor([0, 1, 2,]),
torch.LongTensor([1, 2]),
torch.LongTensor([0, 1, 2,]),
torch.LongTensor([1, 2]),
],
"attention_mask_c": [
torch.LongTensor([1, 1, 1]),
torch.LongTensor([1, 0]),
torch.LongTensor([1, 1, 1]),
torch.LongTensor([1, 0]),
],
"input_ids_f": [
torch.LongTensor([0, 2,]),
torch.LongTensor([1, 2, 0]),
torch.LongTensor([0, 2,]),
torch.LongTensor([1, 2, 0]),
],
"attention_mask_g": [
torch.LongTensor([1, 1]),
torch.LongTensor([1, 1, 0]),
torch.LongTensor([1, 1]),
torch.LongTensor([1, 1, 1]),
],
# fmt: on
}
dummy_dataset = Dataset.from_dict(dummy_dataset_dict)
trainer = RewardTrainer(
model=self.model,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
)
with self.assertRaises(ValueError):
trainer.train()
training_args = RewardConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=1,
remove_unused_columns=True,
)
with self.assertWarns(UserWarning):
trainer = RewardTrainer(
model=self.model,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
)
def test_reward_trainer_margin(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = RewardConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
)
# fmt: off
dummy_dataset_dict = {
"input_ids_chosen": [
torch.LongTensor([0, 1, 2,]),
],
"attention_mask_chosen": [
torch.LongTensor([1, 1, 1]),
],
"input_ids_rejected": [
torch.LongTensor([0, 2,]),
],
"attention_mask_rejected": [
torch.LongTensor([1, 1]),
],
"margin": [
torch.FloatTensor([1.0]),
]
}
# fmt: on
dummy_dataset = Dataset.from_dict(dummy_dataset_dict)
trainer = RewardTrainer(
model=self.model,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
)
batch = [dummy_dataset[0]]
batch = trainer.data_collator(batch)
loss, outputs = trainer.compute_loss(trainer.model, batch, return_outputs=True)
self.assertAlmostEqual(
loss,
-torch.nn.functional.logsigmoid(
outputs["rewards_chosen"] - outputs["rewards_rejected"] - batch["margin"]
).mean(),
)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_no_peft.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from unittest.mock import patch
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from .testing_utils import is_peft_available, require_peft
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, query_data, response_data):
self.query_data = query_data
self.response_data = response_data
def __len__(self):
return len(self.query_data)
def __getitem__(self, idx):
return self.query_data[idx], self.response_data[idx]
EXPECTED_STATS = [
"objective/kl",
"objective/kl_dist",
"objective/logprobs",
"objective/ref_logprobs",
"objective/kl_coef",
"objective/entropy",
"ppo/mean_non_score_reward",
"ppo/loss/policy",
"ppo/loss/value",
"ppo/loss/total",
"ppo/policy/entropy",
"ppo/policy/approxkl",
"ppo/policy/policykl",
"ppo/policy/clipfrac",
"ppo/policy/advantages",
"ppo/policy/advantages_mean",
"ppo/policy/ratio",
"ppo/returns/mean",
"ppo/returns/var",
"ppo/val/vpred",
"ppo/val/error",
"ppo/val/clipfrac",
"ppo/val/mean",
"ppo/val/var",
"ppo/val/var_explained",
"time/ppo/forward_pass",
"time/ppo/compute_rewards",
"time/ppo/optimize_step",
"time/ppo/calc_stats",
"time/ppo/total",
"ppo/learning_rate",
]
@require_peft
class TestPeftDependancy(unittest.TestCase):
def setUp(self):
self.causal_lm_model_id = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM"
self.seq_to_seq_model_id = "trl-internal-testing/tiny-random-T5ForConditionalGeneration"
if is_peft_available():
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
self.peft_model = get_peft_model(causal_lm_model, lora_config)
def test_no_peft(self):
with patch.dict(sys.modules, {"peft": None}):
from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead
# Check that loading a model with `peft` will raise an error
with self.assertRaises(ModuleNotFoundError):
import peft # noqa
trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id) # noqa
trl_seq2seq_model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(self.seq_to_seq_model_id) # noqa
def test_imports_no_peft(self):
with patch.dict(sys.modules, {"peft": None}):
from trl import ( # noqa
AutoModelForCausalLMWithValueHead,
AutoModelForSeq2SeqLMWithValueHead,
PPOConfig,
PPOTrainer,
PreTrainedModelWrapper,
)
def test_ppo_trainer_no_peft(self):
with patch.dict(sys.modules, {"peft": None}):
from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer
ppo_model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(ppo_model_id)
tokenizer = AutoTokenizer.from_pretrained(ppo_model_id)
tokenizer.pad_token_id = tokenizer.eos_token_id
ppo_config = PPOConfig(batch_size=2, mini_batch_size=1, log_with=None)
dummy_dataset = DummyDataset(
[torch.LongTensor([0, 1, 0, 1, 0, 1]), torch.LongTensor([0, 1, 0, 1, 0, 1])],
[torch.LongTensor([1, 0, 1, 0, 1, 0]), torch.LongTensor([0, 1, 0, 1, 0, 1])],
)
ppo_trainer = PPOTrainer(
config=ppo_config,
model=trl_model,
ref_model=None,
tokenizer=tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
train_stats = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
# check gradients are not None
for _, param in trl_model.named_parameters():
if param.requires_grad:
self.assertIsNotNone(param.grad)
# check expected stats
for stat in EXPECTED_STATS:
self.assertIn(stat, train_stats)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_iterative_sft_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from datasets import Dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments
from trl import IterativeSFTTrainer
class IterativeTrainerTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.tokenizer.pad_token = cls.tokenizer.eos_token
# get t5 as seq2seq example:
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab"
cls.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
cls.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
def _init_tensor_dummy_dataset(self):
dummy_dataset_dict = {
"input_ids": [torch.tensor([5303, 3621]), torch.tensor([3666, 1438, 318]), torch.tensor([5303, 3621])],
"attention_mask": [torch.tensor([1, 1]), torch.tensor([1, 1, 1]), torch.tensor([1, 1])],
"labels": [torch.tensor([5303, 3621]), torch.tensor([3666, 1438, 318]), torch.tensor([5303, 3621])],
}
dummy_dataset = Dataset.from_dict(dummy_dataset_dict)
dummy_dataset.set_format("torch")
return dummy_dataset
def _init_textual_dummy_dataset(self):
dummy_dataset_dict = {
"texts": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"],
"texts_labels": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"],
}
dummy_dataset = Dataset.from_dict(dummy_dataset_dict)
dummy_dataset.set_format("torch")
return dummy_dataset
def setUp(self):
# initialize trainer
self.model.train()
return super().setUp()
@parameterized.expand(
[
["gpt2", "tensor"],
["gpt2", "text"],
["t5", "tensor"],
["t5", "text"],
]
)
def test_iterative_step_from_tensor(self, model_name, input_name):
with tempfile.TemporaryDirectory() as tmp_dir:
# initialize dataset
if input_name == "tensor":
dummy_dataset = self._init_tensor_dummy_dataset()
inputs = {
"input_ids": dummy_dataset["input_ids"],
"attention_mask": dummy_dataset["attention_mask"],
"labels": dummy_dataset["labels"],
}
else:
dummy_dataset = self._init_textual_dummy_dataset()
inputs = {
"texts": dummy_dataset["texts"],
"texts_labels": dummy_dataset["texts_labels"],
}
if model_name == "gpt2":
model = self.model
tokenizer = self.tokenizer
else:
model = self.t5_model
tokenizer = self.t5_tokenizer
args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=2,
)
iterative_trainer = IterativeSFTTrainer(model=model, args=args, tokenizer=tokenizer)
iterative_trainer.step(**inputs)
for param in iterative_trainer.model.parameters():
assert param.grad is not None
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_data_collator_completion_only.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from transformers import AutoTokenizer
from trl import DataCollatorForCompletionOnlyLM
class DataCollatorForCompletionOnlyLMTester(unittest.TestCase):
def test_data_collator_finds_response_template_llama2_tokenizer(self):
# this should ideally be tested with meta-llama/Llama-2-7b-hf
self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab")
self.instruction = """### System: You are a helpful assistant.
### User: How much is 2+2?
### Assistant: 2+2 equals 4"""
self.instruction_template = "\n### User:"
self.response_template = "\n### Assistant:"
# GPT2Tokenizer: [198, 21017, 11787, 25] -> [21017, 11787, 25]
# Llama2Tokenizer: [29871, 13, 2277, 29937, 4911, 29901] -> [2277, 29937, 4911, 29901]
# Note: If this test is ever switched to Llama2Tokenizer, this should be double checked,
# and possibly switched back to [2:] instead of [1:].
# With GPT2Tokenizer, [1:] is correct - we want the 21017 token included, which is ###.
self.tokenized_instruction_w_context = self.tokenizer.encode(
self.instruction_template, add_special_tokens=False
)[1:]
# GPT2Tokenizer: [198, 21017, 15286, 25] -> [15286, 25]
# Llama2Tokenizer: [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901]
self.tokenized_response_w_context = self.tokenizer.encode(self.response_template, add_special_tokens=False)[2:]
# Plain check on string
self.assertIn(self.response_template, self.instruction)
self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False)
# Test the fix for #598
# Pass already tokenized (w context) and truncated response_template so token_ids are like in the instruction + response
self.collator = DataCollatorForCompletionOnlyLM(self.tokenized_response_w_context, tokenizer=self.tokenizer)
self.collator.torch_call([self.tokenized_instruction])
# Test for PR #749
# Pass already tokenized (w context) instruction and response both so token_ids are like in the instruction + response
self.collator = DataCollatorForCompletionOnlyLM(
self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer
)
self.collator.torch_call([self.tokenized_instruction])
# Test for PR #1185
# We pass in a string where the first user template is different than the rest.
# Usually this would happen due to context-sensitive tokenization, but here we
# explicitly change the template to test the fix.
self.instruction = """## User: First instruction
### Assistant: First response
### User: Second instruction
### Assistant: Second response"""
self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False)
self.collator = DataCollatorForCompletionOnlyLM(
self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer
)
collator_output = self.collator.torch_call([self.tokenized_instruction])
collator_text = self.tokenizer.decode(
collator_output["labels"][torch.where(collator_output["labels"] != -100)]
)
expected_text = " First response\n\n Second response" ""
self.assertEqual(collator_text, expected_text)
def test_data_collator_handling_of_long_sequences(self):
self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab")
self.instruction = """### System: You are a helpful assistant.
### User: How much is 2+2? I'm asking because I'm not sure. And I'm not sure because I'm not good at math.
"""
self.response_template = "\n### Assistant:"
# check DataCollatorForCompletionOnlyLM using response template only
self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False)
self.collator = DataCollatorForCompletionOnlyLM(self.response_template, tokenizer=self.tokenizer)
encoded_instance = self.collator.torch_call([self.tokenized_instruction])
result = torch.all(encoded_instance["labels"] == -100)
self.assertTrue(result, "Not all values in the tensor are -100.")
# check DataCollatorForCompletionOnlyLM using response template and instruction template
self.instruction_template = "\n### User:"
self.collator = DataCollatorForCompletionOnlyLM(
self.response_template, self.instruction_template, tokenizer=self.tokenizer
)
encoded_instance = self.collator.torch_call([self.tokenized_instruction])
result = torch.all(encoded_instance["labels"] == -100)
self.assertTrue(result, "Not all values in the tensor are -100.")
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_ppo_trainer.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import fnmatch
import gc
import re
import tempfile
import unittest
import pytest
import torch
from huggingface_hub import HfApi, HfFolder, delete_repo
from parameterized import parameterized
from pytest import mark
from requests.exceptions import HTTPError
from transformers import AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, set_seed
from trl.core import respond_to_batch
from .testing_constants import CI_HUB_ENDPOINT, CI_HUB_USER, CI_HUB_USER_TOKEN
from .testing_utils import require_peft, require_torch_multi_gpu
EXPECTED_STATS = [
"objective/kl",
"objective/kl_dist",
"objective/logprobs",
"objective/ref_logprobs",
"objective/kl_coef",
"objective/entropy",
"ppo/mean_non_score_reward",
"ppo/loss/policy",
"ppo/loss/value",
"ppo/loss/total",
"ppo/policy/entropy",
"ppo/policy/approxkl",
"ppo/policy/policykl",
"ppo/policy/clipfrac",
"ppo/policy/advantages",
"ppo/policy/advantages_mean",
"ppo/policy/ratio",
"ppo/returns/mean",
"ppo/returns/var",
"ppo/val/vpred",
"ppo/val/error",
"ppo/val/clipfrac",
"ppo/val/mean",
"ppo/val/var",
"ppo/val/var_explained",
"time/ppo/forward_pass",
"time/ppo/compute_rewards",
"time/ppo/optimize_step",
"time/ppo/calc_stats",
"time/ppo/total",
"ppo/learning_rate",
]
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, query_data, response_data):
self.query_data = query_data
self.response_data = response_data
def __len__(self):
return len(self.query_data)
def __getitem__(self, idx):
return self.query_data[idx], self.response_data[idx]
def apply_mask(values, mask):
unmasked_values = []
for v, m in zip(values, mask):
if m == 1:
unmasked_values.append(v)
return torch.Tensor(unmasked_values)
def abs_diff_masked_tensors(tensor_1, tensor_2, mask_1, mask_2):
diffs = []
for l1, l2, m1, m2 in zip(tensor_1, tensor_2, mask_1, mask_2):
diff = apply_mask(l1, m1) - apply_mask(l2, m2)
diffs.append(diff.sum())
return abs(sum(diffs))
class PPOTrainerTester(unittest.TestCase):
"""
A wrapper class for testing PPOTrainer
"""
@classmethod
def setUpClass(cls):
set_seed(42)
cls._token = CI_HUB_USER_TOKEN
cls._api = HfApi(endpoint=CI_HUB_ENDPOINT)
HfFolder.save_token(CI_HUB_USER_TOKEN)
# model_id
cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
# get models and tokenizer
cls.gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(cls.model_id)
cls.gpt2_model_ref = AutoModelForCausalLMWithValueHead.from_pretrained(cls.model_id)
cls.gpt2_tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.gpt2_tokenizer.pad_token = cls.gpt2_tokenizer.eos_token
# get bloom as right padding examples:
model_id = "trl-internal-testing/tiny-BloomForCausalLM-correct-vocab"
cls.bloom_model = AutoModelForCausalLMWithValueHead.from_pretrained(model_id)
cls.bloom_tokenizer = AutoTokenizer.from_pretrained(model_id)
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab"
cls.t5_model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(model_id)
cls.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
# initialize trainer
cls.ppo_config = PPOConfig(batch_size=2, mini_batch_size=1, log_with=None)
@classmethod
def tearDownClass(cls):
for model in [f"{CI_HUB_USER}/test-ppo-trainer"]:
try:
delete_repo(token=cls._token, repo_id=model)
except HTTPError:
pass
def setUp(self):
# initialize trainer
self.ppo_config = PPOConfig(batch_size=2, mini_batch_size=1, log_with=None)
self.gpt2_model.train()
return super().setUp()
def tearDown(self):
# free memory
gc.collect()
def _init_dummy_dataset(self):
# encode a query
query_txt = "This morning I went to the "
query_tensor = self.gpt2_tokenizer.encode(query_txt, return_tensors="pt")
assert query_tensor.shape == (1, 7)
# get model response
response_tensor = respond_to_batch(self.gpt2_model, query_tensor)
assert response_tensor.shape == (1, 20)
# create a dummy dataset
min_length = min(len(query_tensor[0]), len(response_tensor[0]))
dummy_dataset = DummyDataset(
[query_tensor[:, :min_length].squeeze(0) for _ in range(2)],
[response_tensor[:, :min_length].squeeze(0) for _ in range(2)],
)
return dummy_dataset
def test_drop_last_dataloader(self):
self.ppo_config = PPOConfig(batch_size=3, mini_batch_size=1, log_with=None)
dummy_dataset = self._init_dummy_dataset()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=self.gpt2_model_ref,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
self.assertEqual(len(dummy_dataloader), 0)
def test_ppo_step(self):
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=self.gpt2_model_ref,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
train_stats = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
for param in ppo_trainer.model.parameters():
assert param.grad is not None
for stat in EXPECTED_STATS:
assert stat in train_stats.keys()
def test_ppo_step_with_masks(self):
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=self.gpt2_model_ref,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
response_mask = [torch.ones_like(r) for r in response_tensor]
# train model
train_stats = ppo_trainer.step(
[q for q in query_tensor], [r for r in response_tensor], reward, response_mask
)
break
for param in ppo_trainer.model.parameters():
assert param.grad is not None
for stat in EXPECTED_STATS:
assert stat in train_stats.keys()
def test_ppo_step_with_no_ref_sgd(self):
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
optimizer = torch.optim.SGD(self.gpt2_model.parameters(), lr=0.01)
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
optimizer=optimizer,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
self.assertTrue(isinstance(ppo_trainer.optimizer.optimizer, torch.optim.SGD))
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
train_stats = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
for name, param in ppo_trainer.model.named_parameters():
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
# ref model should not be trained
for name, param in ppo_trainer.ref_model.named_parameters():
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
# Finally check stats
for stat in EXPECTED_STATS:
assert stat in train_stats.keys()
def test_ppo_step_with_no_ref_sgd_lr_scheduler(self):
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
optimizer = torch.optim.SGD(self.gpt2_model.parameters(), lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
optimizer=optimizer,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
lr_scheduler=lr_scheduler,
)
dummy_dataloader = ppo_trainer.dataloader
self.assertTrue(isinstance(ppo_trainer.optimizer.optimizer, torch.optim.SGD))
self.assertTrue(isinstance(ppo_trainer.lr_scheduler.scheduler, torch.optim.lr_scheduler.ExponentialLR))
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
train_stats = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
for name, param in ppo_trainer.model.named_parameters():
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
# ref model should not be trained
for name, param in ppo_trainer.ref_model.named_parameters():
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
# Finally check stats
for stat in EXPECTED_STATS:
assert stat in train_stats.keys()
# assert that the LR has increased for exponential decay
self.assertTrue(train_stats["ppo/learning_rate"] > self.ppo_config.learning_rate)
def test_ppo_step_with_no_ref(self):
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
self.gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.model_id)
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
train_stats = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
for name, param in ppo_trainer.model.named_parameters():
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
# ref model should not be trained
for name, param in ppo_trainer.ref_model.named_parameters():
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
# initialize a new gpt2 model:
model = AutoModelForCausalLMWithValueHead.from_pretrained(self.model_id)
for name, param in ppo_trainer.ref_model.named_parameters():
if "v_head" not in name:
name = name.replace("pretrained_model.", "")
self.assertTrue(
torch.allclose(param.cpu(), model.state_dict()[name].cpu()),
f"Parameter {name} has changed from the original model",
)
# Finally check stats
for stat in EXPECTED_STATS:
assert stat in train_stats.keys()
def test_ppo_step_with_no_ref_custom_layers(self):
"""
Test PPO step with no reference model and custom layers
For shared layers configuration, all the layers after the `num_shared_layers` are considered as custom layers
therefore the gradients should be computed for these layers only.
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
self.gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.model_id)
num_shared_layers = 1
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
num_shared_layers=num_shared_layers,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
train_stats = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
pattern = r".*transformer\.h\.(\d+)\..*"
final_layers = ["ln_f", "v_head", "lm_head"]
for name, param in ppo_trainer.model.named_parameters():
if re.match(pattern, name):
layer_number = int(re.match(pattern, name).groups(0)[0])
if layer_number < num_shared_layers:
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
else:
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
elif any([layer in name for layer in final_layers]):
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
# ref model should not be trained
for name, param in ppo_trainer.ref_model.named_parameters():
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
for stat in EXPECTED_STATS:
assert stat in train_stats.keys()
def test_ppo_step_with_ref_and_custom_layers_warning(self):
"""
Test PPO step with a reference model and custom layers
The trainer should raise a warning if the argument `num_shared_layers` is set
together with a reference model.
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
num_shared_layers = 6
with self.assertWarns(UserWarning):
_ = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=self.gpt2_model_ref,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
num_shared_layers=num_shared_layers,
)
def test_ppo_step_rewards_shape(self):
"""
Test if the rewards shape is correct by asserting that if a wrong reward shape is passed, we get
a value error.
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor([[1.0]]), torch.tensor([[0.0]])]
# train model - this should raise an error
with self.assertRaises(ValueError):
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
reward = [torch.tensor([1.0]), torch.tensor([0.0])]
# train model - this should work
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
# check if the gradients are computed for the model
for name, param in ppo_trainer.model.named_parameters():
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
# ref model should not be trained
for name, param in ppo_trainer.ref_model.named_parameters():
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
def test_ppo_step_input_shape(self):
"""
Test if the shape of the expected inputs are correct
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor([1.0]), torch.tensor([0.0])]
# train model - this should raise an error
bs = ppo_trainer.config.batch_size
queries, responses, _, _ = ppo_trainer._step_safety_checker(
bs, [q for q in query_tensor], [r for r in response_tensor], reward
)
self.assertTrue(isinstance(queries, list), f"queries should be a list, got {type(queries)}")
self.assertTrue(isinstance(responses, list), f"responses should be a list, got {type(responses)}")
# check the shapes
for i in range(bs):
self.assertEqual(queries[i].shape, torch.Size([7]))
self.assertEqual(responses[i].size(), torch.Size([7]))
break
def test_ppo_step_no_dataset(self):
"""
Test if the training loop works fine without passing a dataset
"""
query_txt = "This morning I went to the "
query_tensor = self.gpt2_tokenizer.encode(query_txt, return_tensors="pt")
self.ppo_config.batch_size = 1
response_tensor = respond_to_batch(self.gpt2_model, query_tensor)
# Check that this warns the user about batch size
with self.assertWarns(UserWarning):
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=self.gpt2_model_ref,
tokenizer=self.gpt2_tokenizer,
)
# train model with ppo
reward = [torch.tensor([1.0])]
# train model - this should work fine
train_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward)
# check gradients
for name, param in ppo_trainer.model.named_parameters():
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
# ref model should not be trained
for name, param in ppo_trainer.ref_model.named_parameters():
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
# check train stats
for stat in EXPECTED_STATS:
self.assertTrue(stat in train_stats, f"Train stats should contain {stat}")
def test_loss_trainer(self):
"""
Test if the loss trainer works fine
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
self.gpt2_model.eval()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_queries = [torch.tensor([1, 2, 3, 4]), torch.tensor([1, 2, 3, 4, 5, 6, 7])]
dummy_responses = [torch.tensor([5, 6, 7, 8, 9]), torch.tensor([8, 9, 10, 11, 12, 13])]
dummy_scores = torch.Tensor([1, 2])
ppo_trainer.config.mini_batch_size = 1
ppo_trainer.config.batch_size = 1
model_inputs = ppo_trainer.prepare_model_inputs(dummy_queries, dummy_responses)
all_logprobs, _, values, mask = ppo_trainer.batched_forward_pass(
self.gpt2_model, dummy_queries, dummy_responses, model_inputs
)
# dummy values
ref_logprobs = all_logprobs + 1
logits = torch.exp(all_logprobs)
vpreds = values + 0.1
score, non_score, kls = ppo_trainer.compute_rewards(dummy_scores, all_logprobs, ref_logprobs, mask)
values, advantages, returns = ppo_trainer.compute_advantages(values, score, mask)
# just make sure a dummy loss is computed
idx = 0
pg_loss, v_loss, _ = ppo_trainer.loss(
all_logprobs[idx].unsqueeze(0),
values[idx].unsqueeze(0),
logits[idx].unsqueeze(0),
vpreds[idx].unsqueeze(0),
ref_logprobs[idx].unsqueeze(0),
mask[idx].unsqueeze(0),
advantages[idx].unsqueeze(0),
returns[idx].unsqueeze(0),
)
self.assertAlmostEqual(pg_loss.item(), 2.0494, 4)
self.assertAlmostEqual(v_loss.item(), 0.07110, 4)
# check if we get same results with masked parts removed
pg_loss_unmasked, v_loss_unmasked, _ = ppo_trainer.loss(
apply_mask(all_logprobs[idx], mask[idx]).unsqueeze(0),
apply_mask(values[idx], mask[idx]).unsqueeze(0),
apply_mask(logits[idx], mask[idx]).unsqueeze(0),
apply_mask(vpreds[idx], mask[idx]).unsqueeze(0),
apply_mask(ref_logprobs[idx], mask[idx]).unsqueeze(0),
apply_mask(mask[idx], mask[idx]).unsqueeze(0),
apply_mask(advantages[idx], mask[idx]).unsqueeze(0),
apply_mask(returns[idx], mask[idx]).unsqueeze(0),
)
self.assertAlmostEqual(pg_loss_unmasked.item(), 2.0494, 4)
self.assertAlmostEqual(v_loss_unmasked.item(), 0.07110, 4)
@parameterized.expand(
[
["gpt2"],
["bloom"],
["t5"],
]
)
def test_batched_forward_pass(self, name):
"""
Test if the loss trainer works fine
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
dummy_queries = [torch.tensor([1, 2, 3, 4]), torch.tensor([1, 2, 3, 4, 5, 6, 7])]
dummy_responses = [torch.tensor([5, 6, 7, 8, 9]), torch.tensor([8, 9, 10, 11, 12, 13])]
if name == "gpt2":
model = self.gpt2_model
tokenizer = self.gpt2_tokenizer
elif name == "bloom":
model = self.bloom_model
tokenizer = self.bloom_tokenizer
elif name == "t5":
model = self.t5_model
tokenizer = self.t5_tokenizer
model.eval()
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=model,
ref_model=None,
tokenizer=tokenizer,
dataset=dummy_dataset,
)
# we test all combinations of fwd_bs and bs:
# if fwd_bs=bs=1: no padding is applied and only one forward pass
# if fwd_bs=1/bs=2: padding is applied and results computed in two fwd passes
# if fwd_bs=bs=2: padding is applied and results computed in one fwd pass
ppo_trainer.config.mini_batch_size = 1
ppo_trainer.config.batch_size = 1
model_inputs = ppo_trainer.prepare_model_inputs([dummy_queries[0]], [dummy_responses[0]])
logprobs_0, logits_0, values_0, mask_0 = ppo_trainer.batched_forward_pass(
model, [dummy_queries[0]], [dummy_responses[0]], model_inputs
)
ppo_trainer.config.batch_size = 2
model_inputs = ppo_trainer.prepare_model_inputs(dummy_queries, dummy_responses)
logprobs_1, logits_1, values_1, mask_1 = ppo_trainer.batched_forward_pass(
model, dummy_queries, dummy_responses, model_inputs
)
ppo_trainer.config.mini_batch_size = 2
model_inputs = ppo_trainer.prepare_model_inputs(dummy_queries, dummy_responses)
logprobs_2, logits_2, values_2, mask_2 = ppo_trainer.batched_forward_pass(
model, dummy_queries, dummy_responses, model_inputs
)
self.assertLessEqual(abs_diff_masked_tensors(logprobs_1, logprobs_2, mask_1, mask_2), 1e-4)
self.assertLessEqual(abs_diff_masked_tensors(values_1, values_2, mask_1, mask_2), 1e-4)
self.assertLessEqual(abs_diff_masked_tensors(logprobs_0, logprobs_2[:1], mask_0, mask_2[:1]), 1e-4)
self.assertLessEqual(abs_diff_masked_tensors(values_0, values_2[:1], mask_0, mask_2[:1]), 1e-4)
def test_ppo_trainer_max_grad_norm(self):
"""
Test if the `max_grad_norm` feature works as expected
"""
# initialize dataset
dummy_dataset = self._init_dummy_dataset()
self.ppo_config.max_grad_norm = 0.00001
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
# check gradients
for name, param in ppo_trainer.model.named_parameters():
self.assertTrue(param.grad is not None, f"Parameter {name} has no gradient")
self.assertTrue(
torch.all(param.grad.abs() <= self.ppo_config.max_grad_norm),
f"Parameter {name} has a gradient larger than max_grad_norm",
)
def test_ppo_trainer_kl_penalty(self):
dummy_dataset = self._init_dummy_dataset()
log_probs = torch.Tensor([[0.5, 0.2, 0.1], [0.6, 0.2, 0.1]])
ref_log_probs = torch.Tensor([[0.4, 0.3, 0.0], [0.7, 0.1, 0.3]])
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
expected_output = torch.Tensor([[0.1000, -0.1000, 0.1000], [-0.1000, 0.1000, -0.2000]])
self.assertTrue(torch.allclose(ppo_trainer._kl_penalty(log_probs, ref_log_probs), expected_output))
self.ppo_config.kl_penalty = "abs"
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
expected_output = torch.Tensor([[0.1000, 0.1000, 0.1000], [0.1000, 0.1000, 0.2000]])
self.assertTrue(torch.allclose(ppo_trainer._kl_penalty(log_probs, ref_log_probs), expected_output))
self.ppo_config.kl_penalty = "mse"
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
expected_output = torch.Tensor([[0.0050, 0.0050, 0.0050], [0.0050, 0.0050, 0.0200]])
self.assertTrue(torch.allclose(ppo_trainer._kl_penalty(log_probs, ref_log_probs), expected_output))
def test_ppo_trainer_full_kl_penalty(self):
# a few more extensive tests for the full kl option as it is more involved
dummy_dataset = self._init_dummy_dataset()
self.ppo_config.kl_penalty = "full"
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
# Test on tensors for size B,S,T = (1,2,3)
# test for when the two dists are the same
log_probs = torch.Tensor(
[
[
[0.1, 0.2, 0.7],
[0.3, 0.4, 0.3],
]
]
).exp()
ref_log_probs = torch.Tensor(
[
[
[0.1, 0.2, 0.7],
[0.3, 0.4, 0.3],
]
]
).exp()
expected_output = torch.Tensor(
[[0.0, 0.0]],
)
output = ppo_trainer._kl_penalty(log_probs, ref_log_probs)
self.assertTrue(output.shape == (1, 2))
self.assertTrue(torch.allclose(output, expected_output))
# test for when the two dists are almost not overlapping
log_probs = torch.Tensor(
[
[
[0.98, 0.01, 0.01],
[0.01, 0.98, 0.01],
]
]
).log()
ref_log_probs = torch.Tensor(
[
[
[0.01, 0.01, 0.98],
[0.01, 0.01, 0.98],
]
]
).log()
expected_output = torch.Tensor(
[[4.4474, 4.4474]],
)
output = ppo_trainer._kl_penalty(log_probs, ref_log_probs)
self.assertTrue(output.shape == (1, 2))
self.assertTrue(torch.allclose(output, expected_output))
# test for when the two dists are almost not overlapping
log_probs = torch.Tensor(
[
[
[0.49, 0.02, 0.49],
[0.49, 0.02, 0.49],
]
]
).log()
ref_log_probs = torch.Tensor(
[
[
[0.01, 0.98, 0.01],
[0.49, 0.02, 0.49],
]
]
).log()
expected_output = torch.Tensor(
[[3.7361, 0.0]],
)
output = ppo_trainer._kl_penalty(log_probs, ref_log_probs)
self.assertTrue(output.shape == (1, 2))
self.assertTrue(torch.allclose(output, expected_output, atol=1e-4))
@require_peft
@mark.peft_test
def test_peft_model_ppo_trainer(self):
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForCausalLM
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
gpt2_model = AutoModelForCausalLM.from_pretrained(self.model_id)
# this line is very important
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
gpt2_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
peft_model = get_peft_model(gpt2_model, lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(peft_model)
dummy_dataset = self._init_dummy_dataset()
self.ppo_config.batch_size = 2
self.ppo_config.mini_batch_size = 1
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
self.assertTrue(ppo_trainer.ref_model is None)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model by running a step twice
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
ppo_trainer.model.train()
ppo_trainer.model.gradient_checkpointing_enable()
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
# check gradients
for name, param in model.named_parameters():
if "lora" in name or "v_head" in name:
self.assertTrue(param.grad is not None, f"Parameter {name} has a no gradient")
else:
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
@require_peft
@mark.peft_test
def test_peft_model_ppo_adapter_rm_trainer(self):
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification
dummy_inputs = torch.LongTensor([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
rm_lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="SEQ_CLS",
)
reward_model = AutoModelForSequenceClassification.from_pretrained(self.model_id)
reward_model = get_peft_model(reward_model, rm_lora_config)
dummy_optim = torch.optim.Adam(filter(lambda p: p.requires_grad, reward_model.parameters()), lr=1e-3)
previous_rm_logits = reward_model(dummy_inputs).logits
loss = previous_rm_logits.mean()
loss.backward()
dummy_optim.step()
reward_model.eval()
original_rm_logits = reward_model(dummy_inputs).logits
with tempfile.TemporaryDirectory() as tmpdirname:
reward_model.save_pretrained(tmpdirname)
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
gpt2_model = AutoModelForCausalLM.from_pretrained(self.model_id)
# this line is very important
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
gpt2_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
peft_model = get_peft_model(gpt2_model, lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(
peft_model,
reward_adapter=tmpdirname,
)
dummy_dataset = self._init_dummy_dataset()
self.ppo_config.batch_size = 2
self.ppo_config.mini_batch_size = 1
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
self.assertTrue(ppo_trainer.ref_model is None)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model by running a step twice
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
ppo_trainer.model.train()
ppo_trainer.model.gradient_checkpointing_enable()
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
new_logits = ppo_trainer.model.compute_reward_score(dummy_inputs)
self.assertTrue(not torch.allclose(previous_rm_logits, new_logits[:, -1, :]))
self.assertTrue(torch.allclose(original_rm_logits, new_logits[:, -1, :]))
# check gradients
for name, param in model.named_parameters():
if ("lora" in name or "v_head" in name) and ("reward" not in name):
self.assertTrue(param.grad is not None, f"Parameter {name} has a no gradient")
else:
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
@unittest.skip("Fix by either patching `whomai()` to work in the staging endpoint or use a dummy prod user.")
def test_push_to_hub(self):
REPO_NAME = "test-ppo-trainer"
repo_id = f"{CI_HUB_USER}/{REPO_NAME}"
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=self.gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=self._init_dummy_dataset(),
)
with tempfile.TemporaryDirectory():
url = ppo_trainer.push_to_hub(repo_id=repo_id, token=self._token, api_endpoint=CI_HUB_ENDPOINT)
# Extract repo_name from the url
re_search = re.search(CI_HUB_ENDPOINT + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
hub_repo_id = re_search.groups()[0]
# Check we created a Hub repo
self.assertEqual(hub_repo_id, repo_id)
# Ensure all files are present
files = sorted(self._api.list_repo_files(hub_repo_id))
assert all(
fnmatch.fnmatch(file, expected_file)
for file, expected_file in zip(
files,
[
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json",
],
)
)
@require_peft
@require_torch_multi_gpu
@mark.peft_test
def test_peft_model_ppo_trainer_multi_gpu(self):
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForCausalLM
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
gpt2_model = AutoModelForCausalLM.from_pretrained(
"gpt2", device_map="balanced", max_memory={0: "500MB", 1: "500MB"}
)
self.assertTrue(set(gpt2_model.hf_device_map.values()) == {0, 1})
# this line is very important
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
gpt2_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
peft_model = get_peft_model(gpt2_model, lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(peft_model)
self.assertTrue(model.is_sequential_parallel)
dummy_dataset = self._init_dummy_dataset()
self.ppo_config.batch_size = 2
self.ppo_config.mini_batch_size = 1
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
self.assertTrue(ppo_trainer.ref_model is None)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model by running a step twice
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
ppo_trainer.model.train()
ppo_trainer.model.gradient_checkpointing_enable()
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
# check gradients
for name, param in model.named_parameters():
if "lora" in name or "v_head" in name:
self.assertTrue(param.grad is not None, f"Parameter {name} has a no gradient")
else:
self.assertTrue(param.grad is None, f"Parameter {name} has a gradient")
def test_generation(self):
dummy_dataset = self._init_dummy_dataset()
model = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=model,
ref_model=None,
tokenizer=tokenizer,
dataset=dummy_dataset,
)
input_texts = ["this is a test", "this is another, longer test"]
generation_kwargs = {"do_sample": False, "max_new_tokens": 4, "pad_token_id": tokenizer.eos_token_id}
tokenizer.pad_token = tokenizer.eos_token
model_inputs = [tokenizer(txt, return_tensors="pt").input_ids.squeeze() for txt in input_texts]
generations_batched = ppo_trainer.generate(model_inputs, batch_size=2, **generation_kwargs)
generations_batched = tokenizer.batch_decode(generations_batched)
generations_single = [ppo_trainer.generate(inputs, **generation_kwargs).squeeze() for inputs in model_inputs]
generations_single = tokenizer.batch_decode(generations_single)
self.assertEqual(generations_single, generations_batched)
def test_grad_accumulation(self):
dummy_dataset = self._init_dummy_dataset()
torch.manual_seed(0)
gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.model_id, summary_dropout_prob=0.0)
gpt2_model_clone = copy.deepcopy(gpt2_model)
self.ppo_config.mini_batch_size = 2
self.ppo_config.ppo_epochs = 1
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=gpt2_model,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(1.0)]
# train model by running a step twice
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
model_grad = gpt2_model.v_head.summary.weight
self.ppo_config.mini_batch_size = 1
self.ppo_config.gradient_accumulation_steps = 2
ppo_trainer = PPOTrainer(
config=self.ppo_config,
model=gpt2_model_clone,
ref_model=None,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(1.0)]
# train model by running a step twice
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
model_grad_acc = gpt2_model_clone.v_head.summary.weight
self.assertTrue(torch.allclose(model_grad_acc, model_grad, rtol=1e-3, atol=1e-3))
@unittest.skip("Fix by either patching `whomai()` to work in the staging endpoint or use a dummy prod user.")
def test_push_to_hub_if_best_reward(self):
REPO_NAME = "test-ppo-trainer"
repo_id = f"{CI_HUB_USER}/{REPO_NAME}"
dummy_dataset = self._init_dummy_dataset()
push_to_hub_if_best_kwargs = {"repo_id": repo_id}
ppo_config = PPOConfig(
batch_size=2,
mini_batch_size=1,
log_with=None,
push_to_hub_if_best_kwargs=push_to_hub_if_best_kwargs,
compare_steps=1,
)
ppo_trainer = PPOTrainer(
config=ppo_config,
model=self.gpt2_model,
ref_model=self.gpt2_model_ref,
tokenizer=self.gpt2_tokenizer,
dataset=dummy_dataset,
)
dummy_dataloader = ppo_trainer.dataloader
# train model with ppo
for query_tensor, response_tensor in dummy_dataloader:
# define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0), torch.tensor(0.0)]
# train model
_ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward)
break
def test_batch_size_check(self):
with pytest.raises(ValueError):
PPOConfig(batch_size=2, mini_batch_size=2, gradient_accumulation_steps=2)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/testing_utils.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from trl import (
is_bitsandbytes_available,
is_diffusers_available,
is_peft_available,
is_wandb_available,
is_xpu_available,
)
def require_peft(test_case):
"""
Decorator marking a test that requires peft. Skips the test if peft is not available.
"""
if not is_peft_available():
test_case = unittest.skip("test requires peft")(test_case)
return test_case
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires bnb. Skips the test if bnb is not available.
"""
if not is_bitsandbytes_available():
test_case = unittest.skip("test requires bnb")(test_case)
return test_case
def require_diffusers(test_case):
"""
Decorator marking a test that requires diffusers. Skips the test if diffusers is not available.
"""
if not is_diffusers_available():
test_case = unittest.skip("test requires diffusers")(test_case)
return test_case
def require_wandb(test_case, required: bool = True):
"""
Decorator marking a test that requires wandb. Skips the test if wandb is not available.
"""
# XOR, i.e.:
# skip if available and required = False and
# skip if not available and required = True
if is_wandb_available() ^ required:
test_case = unittest.skip("test requires wandb")(test_case)
return test_case
def require_no_wandb(test_case):
"""
Decorator marking a test that requires no wandb. Skips the test if wandb is available.
"""
return require_wandb(test_case, required=False)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires multiple GPUs. Skips the test if there aren't enough GPUs.
"""
if torch.cuda.device_count() < 2:
test_case = unittest.skip("test requires multiple GPUs")(test_case)
return test_case
def require_torch_gpu(test_case):
"""
Decorator marking a test that requires GPUs. Skips the test if there is no GPU.
"""
if not torch.cuda.is_available():
test_case = unittest.skip("test requires GPU")(test_case)
return test_case
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires multiple XPUs. Skips the test if there aren't enough XPUs.
"""
if torch.xpu.device_count() < 2 and is_xpu_available():
test_case = unittest.skip("test requires multiple XPUs")(test_case)
return test_case
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_peft_models.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import torch
from pytest import mark
from transformers import AutoModelForCausalLM
from trl import AutoModelForCausalLMWithValueHead, is_peft_available
if is_peft_available():
from peft import get_peft_model, LoraConfig
from .testing_utils import require_bitsandbytes, require_peft
@require_peft
@mark.peft_test
class PeftModelTester(unittest.TestCase):
def setUp(self):
self.causal_lm_model_id = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM"
self.lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
def test_create_peft_model(self):
r"""
Simply creates a peft model and checks that it can be loaded.
"""
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
pretrained_model = get_peft_model(causal_lm_model, self.lora_config)
_ = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model)
def test_peft_requires_grad(self):
r"""
Check that the value head of the returned model has requires_grad=True.
"""
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
pretrained_model = get_peft_model(causal_lm_model, self.lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model)
# Check that the value head has requires_grad=True
self.assertTrue(model.v_head.summary.weight.requires_grad)
def test_check_peft_model_nb_trainable_params(self):
r"""
Check that the number of trainable parameters is correct.
"""
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
pretrained_model = get_peft_model(causal_lm_model, self.lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model)
# Check that the number of trainable parameters is correct
nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 10273)
# Check that the number of trainable param for the non-peft model is correct
non_peft_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id)
nb_trainable_params = sum(p.numel() for p in non_peft_model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 99578)
def test_create_peft_model_from_config(self):
r"""
Simply creates a peft model and checks that it can be loaded.
"""
trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(
self.causal_lm_model_id, peft_config=self.lora_config
)
# Check that the number of trainable parameters is correct
nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 10273)
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config)
# Check that the number of trainable parameters is correct
nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 10273)
@require_bitsandbytes
def test_create_bnb_peft_model_from_config(self):
r"""
Simply creates a peft model and checks that it can be loaded.
"""
from bitsandbytes.nn import Linear8bitLt
trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(
self.causal_lm_model_id, peft_config=self.lora_config, load_in_8bit=True
)
# Check that the number of trainable parameters is correct
nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 10273)
self.assertTrue(
trl_model.pretrained_model.model.gpt_neox.layers[0].mlp.dense_h_to_4h.__class__ == Linear8bitLt
)
causal_lm_model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, load_in_8bit=True, device_map="auto"
)
trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config)
# Check that the number of trainable parameters is correct
nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 10273)
self.assertTrue(
trl_model.pretrained_model.model.gpt_neox.layers[0].mlp.dense_h_to_4h.__class__ == Linear8bitLt
)
def test_save_pretrained_peft(self):
r"""
Check that the model can be saved and loaded properly.
"""
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
pretrained_model = get_peft_model(causal_lm_model, self.lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
# check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory
self.assertTrue(
os.path.isfile(f"{tmp_dir}/adapter_model.safetensors"),
msg=f"{tmp_dir}/adapter_model.safetensors does not exist",
)
self.assertTrue(
os.path.exists(f"{tmp_dir}/adapter_config.json"),
msg=f"{tmp_dir}/adapter_config.json does not exist",
)
# check also for `pytorch_model.bin` and make sure it only contains `v_head` weights
self.assertTrue(
os.path.exists(f"{tmp_dir}/pytorch_model.bin"),
msg=f"{tmp_dir}/pytorch_model.bin does not exist",
)
maybe_v_head = torch.load(f"{tmp_dir}/pytorch_model.bin")
# check that only keys that starts with `v_head` are in the dict
self.assertTrue(
all(k.startswith("v_head") for k in maybe_v_head.keys()),
msg=f"keys in {tmp_dir}/pytorch_model.bin do not start with `v_head`",
)
model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir)
# check all the weights are the same
for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()):
self.assertTrue(torch.allclose(p1[1], p2[1]), msg=f"{p1[0]} != {p2[0]}")
def test_load_pretrained_peft(self):
r"""
Check that the model saved with peft class interface can be loaded properly.
"""
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
pretrained_model = get_peft_model(causal_lm_model, self.lora_config)
model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model)
with tempfile.TemporaryDirectory() as tmp_dir:
pretrained_model.save_pretrained(tmp_dir)
model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir)
# check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory
self.assertTrue(
os.path.isfile(f"{tmp_dir}/adapter_model.safetensors"),
msg=f"{tmp_dir}/adapter_model.safetensors does not exist",
)
self.assertTrue(
os.path.exists(f"{tmp_dir}/adapter_config.json"),
msg=f"{tmp_dir}/adapter_config.json does not exist",
)
# check all the weights are the same
for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()):
if p1[0] not in ["v_head.summary.weight", "v_head.summary.bias"]:
self.assertTrue(torch.allclose(p1[1], p2[1]), msg=f"{p1[0]} != {p2[0]}")
def test_continue_training_peft_model(self):
r"""
Load peft and checks that it can continue training.
"""
causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
pretrained_model = get_peft_model(causal_lm_model, self.lora_config)
with tempfile.TemporaryDirectory() as tmp_dir:
pretrained_model.save_pretrained(tmp_dir)
# set is_trainable to True
model = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir, is_trainable=True)
# Check that the number of trainable parameters is correct
nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
self.assertEqual(nb_trainable_params, 10273)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_dpo_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from datasets import Dataset
from parameterized import parameterized
from pytest import mark
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments
from trl import DPOTrainer
from .testing_utils import require_bitsandbytes, require_no_wandb, require_peft
class DPOTrainerTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id)
cls.ref_model = AutoModelForCausalLM.from_pretrained(cls.model_id)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.tokenizer.pad_token = cls.tokenizer.eos_token
# get t5 as seq2seq example:
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab"
cls.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
cls.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
cls.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
def _init_dummy_dataset(self):
# fmt: off
dummy_dataset_dict = {
"prompt": [
"hello",
"how are you",
"What is your name?",
"What is your name?",
"Which is the best programming language?",
"Which is the best programming language?",
"Which is the best programming language?",
"[INST] How is the stock price? [/INST]",
"[INST] How is the stock price? [/INST] ",
],
"chosen": [
"hi nice to meet you",
"I am fine",
"My name is Mary",
"My name is Mary",
"Python",
"Python",
"Python",
"$46 as of 10am EST",
"46 as of 10am EST",
],
"rejected": [
"leave me alone",
"I am not fine",
"Whats it to you?",
"I dont have a name",
"Javascript",
"C++",
"Java",
" $46 as of 10am EST",
" 46 as of 10am EST",
],
}
# fmt: on
return Dataset.from_dict(dummy_dataset_dict)
@parameterized.expand(
[
["gpt2", "sigmoid", True],
["t5", "hinge", False],
["gpt2", "ipo", False],
["t5", "ipo", True],
["gpt2", "kto_pair", True],
["t5", "kto_pair", False],
]
)
def test_dpo_trainer(self, name, loss_type, pre_compute):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
evaluation_strategy="steps",
)
dummy_dataset = self._init_dummy_dataset()
if name == "gpt2":
model = self.model
ref_model = self.ref_model
tokenizer = self.tokenizer
elif name == "t5":
model = self.t5_model
ref_model = self.t5_ref_model
tokenizer = self.t5_tokenizer
trainer = DPOTrainer(
model=model,
ref_model=ref_model,
beta=0.1,
loss_type=loss_type,
args=training_args,
tokenizer=tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
precompute_ref_log_probs=pre_compute,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# check the params have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
# check the params have changed - ignore 0 biases
if param.sum() != 0:
self.assertFalse(torch.equal(param, new_param))
def test_dpo_trainer_without_providing_ref_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
)
dummy_dataset = self._init_dummy_dataset()
trainer = DPOTrainer(
model=self.model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
precompute_ref_log_probs=True,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# check the params have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
# check the params have changed - ignore 0 biases
if param.sum() != 0:
self.assertFalse(torch.equal(param, new_param))
@require_peft
@mark.peft_test
def test_dpo_trainer_without_providing_ref_model_with_lora(self):
from peft import LoraConfig
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
)
dummy_dataset = self._init_dummy_dataset()
trainer = DPOTrainer(
model=self.model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
peft_config=lora_config,
precompute_ref_log_probs=True,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# check the params have changed
for n, param in previous_trainable_params.items():
if "lora" in n:
new_param = trainer.model.get_parameter(n)
# check the params have changed - ignore 0 biases
if param.sum() != 0:
self.assertFalse(torch.equal(param, new_param))
def test_dpo_trainer_padding_token_is_none(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
evaluation_strategy="steps",
)
dummy_dataset = self._init_dummy_dataset()
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
tokenizer.pad_token = None
with self.assertRaisesRegex(
ValueError,
expected_regex=r"Padding is enabled, but the tokenizer is not configured with a padding token."
r" Explicitly set `tokenizer.pad_token` \(e.g. `tokenizer.pad_token = tokenizer.eos_token`\)"
r" before calling the trainer.",
):
trainer = DPOTrainer(
model=self.model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
)
trainer.train()
@require_no_wandb
def test_dpo_trainer_generate_during_eval_no_wandb(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
evaluation_strategy="steps",
)
dummy_dataset = self._init_dummy_dataset()
with self.assertRaisesRegex(
ValueError,
expected_regex="`generate_during_eval=True` requires Weights and Biases to be installed."
" Please install `wandb` to resolve.",
):
DPOTrainer(
model=self.model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
generate_during_eval=True,
)
@require_peft
@mark.peft_test
def test_dpo_lora_save(self):
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
# lora model
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model_peft = get_peft_model(model, lora_config)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
)
dummy_dataset = self._init_dummy_dataset()
# dpo train lora model with a lora config
trainer = DPOTrainer(
model=model_peft,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
peft_config=lora_config,
precompute_ref_log_probs=True,
)
# train the model
trainer.train()
# save peft adapter
trainer.save_model()
# assert that the model is loaded without giving OSError
try:
AutoModelForCausalLM.from_pretrained(tmp_dir)
except OSError:
self.fail("Loading the saved peft adapter failed")
@require_peft
@require_bitsandbytes
@mark.peft_test
def test_dpo_lora_bf16_autocast_llama(self):
# Note this test only works on compute capability > 7 GPU devices
from peft import LoraConfig
model_id = "HuggingFaceM4/tiny-random-LlamaForCausalLM"
tokenizer = AutoTokenizer.from_pretrained(model_id)
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
# lora model
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
bf16=True,
)
dummy_dataset = self._init_dummy_dataset()
# dpo train lora model with a lora config
trainer = DPOTrainer(
model=model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
peft_config=lora_config,
generate_during_eval=True,
)
# train the model
trainer.train()
# save peft adapter
trainer.save_model()
@parameterized.expand(
[
["gpt2", "sigmoid", False, False],
["gpt2", "sigmoid", False, True],
["gpt2", "sigmoid", True, False],
["gpt2", "sigmoid", True, True],
["gpt2", "ipo", False, False],
["gpt2", "ipo", False, True],
["gpt2", "ipo", True, False],
["gpt2", "ipo", True, True],
["gpt2", "kto_pair", False, False],
["gpt2", "kto_pair", False, True],
["gpt2", "kto_pair", True, False],
["gpt2", "kto_pair", True, True],
]
)
@require_bitsandbytes
@require_peft
@mark.peft_test
@unittest.skip("You need a GPU with bf16 support in order to run these tests")
def test_dpo_lora_bf16_autocast(self, name, loss_type, pre_compute, gen_during_eval):
# Note this test only works on compute capability > 7 GPU devices
from peft import LoraConfig
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
# lora model
model = AutoModelForCausalLM.from_pretrained(self.model_id, load_in_4bit=True)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
evaluation_strategy="steps",
bf16=True,
)
dummy_dataset = self._init_dummy_dataset()
# dpo train lora model with a lora config
trainer = DPOTrainer(
model=model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=self.tokenizer,
train_dataset=dummy_dataset,
eval_dataset=dummy_dataset,
peft_config=lora_config,
generate_during_eval=gen_during_eval,
loss_type=loss_type,
precompute_ref_log_probs=pre_compute,
)
# train the model
trainer.train()
# save peft adapter
trainer.save_model()
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_ddpo_trainer.py | # Copyright 2023 metric-space, The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from trl import is_diffusers_available, is_peft_available
from .testing_utils import require_diffusers
if is_diffusers_available() and is_peft_available():
from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline
def scorer_function(images, prompts, metadata):
return torch.randn(1) * 3.0, {}
def prompt_function():
return ("cabbages", {})
@require_diffusers
class DDPOTrainerTester(unittest.TestCase):
"""
Test the DDPOTrainer class.
"""
def setUp(self):
self.ddpo_config = DDPOConfig(
num_epochs=2,
train_gradient_accumulation_steps=1,
per_prompt_stat_tracking_buffer_size=32,
sample_num_batches_per_epoch=2,
sample_batch_size=2,
mixed_precision=None,
save_freq=1000000,
)
pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch"
pretrained_revision = "main"
pipeline = DefaultDDPOStableDiffusionPipeline(
pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=False
)
self.trainer = DDPOTrainer(self.ddpo_config, scorer_function, prompt_function, pipeline)
return super().setUp()
def tearDown(self) -> None:
gc.collect()
def test_loss(self):
advantage = torch.tensor([-1.0])
clip_range = 0.0001
ratio = torch.tensor([1.0])
loss = self.trainer.loss(advantage, clip_range, ratio)
self.assertEqual(loss.item(), 1.0)
def test_generate_samples(self):
samples, output_pairs = self.trainer._generate_samples(1, 2)
self.assertEqual(len(samples), 1)
self.assertEqual(len(output_pairs), 1)
self.assertEqual(len(output_pairs[0][0]), 2)
def test_calculate_loss(self):
samples, _ = self.trainer._generate_samples(1, 2)
sample = samples[0]
latents = sample["latents"][0, 0].unsqueeze(0)
next_latents = sample["next_latents"][0, 0].unsqueeze(0)
log_probs = sample["log_probs"][0, 0].unsqueeze(0)
timesteps = sample["timesteps"][0, 0].unsqueeze(0)
prompt_embeds = sample["prompt_embeds"]
advantage = torch.tensor([1.0], device=prompt_embeds.device)
self.assertEqual(latents.shape, (1, 4, 64, 64))
self.assertEqual(next_latents.shape, (1, 4, 64, 64))
self.assertEqual(log_probs.shape, (1,))
self.assertEqual(timesteps.shape, (1,))
self.assertEqual(prompt_embeds.shape, (2, 77, 32))
loss, approx_kl, clipfrac = self.trainer.calculate_loss(
latents, timesteps, next_latents, log_probs, advantage, prompt_embeds
)
self.assertTrue(torch.isfinite(loss.cpu()))
@require_diffusers
class DDPOTrainerWithLoRATester(DDPOTrainerTester):
"""
Test the DDPOTrainer class.
"""
def setUp(self):
self.ddpo_config = DDPOConfig(
num_epochs=2,
train_gradient_accumulation_steps=1,
per_prompt_stat_tracking_buffer_size=32,
sample_num_batches_per_epoch=2,
sample_batch_size=2,
mixed_precision=None,
save_freq=1000000,
)
pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch"
pretrained_revision = "main"
pipeline = DefaultDDPOStableDiffusionPipeline(
pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=True
)
self.trainer = DDPOTrainer(self.ddpo_config, scorer_function, prompt_function, pipeline)
return super().setUp()
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_e2e.py | import subprocess
def test_hello_world():
subprocess.run(
"python examples/hello_world.py",
shell=True,
check=True,
)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_sft_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import tempfile
import unittest
import numpy as np
import torch
from datasets import Dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments
from trl import SFTTrainer
from trl.import_utils import is_peft_available
from trl.trainer import ConstantLengthDataset, DataCollatorForCompletionOnlyLM
from .testing_utils import require_peft
def formatting_prompts_func(example):
text = f"### Question: {example['question']}\n ### Answer: {example['answer']}"
return text
def formatting_prompts_func_batched(example):
output_text = []
for i, question in enumerate(example["question"]):
text = f"### Question: {question}\n ### Answer: {example['answer'][i]}"
output_text.append(text)
return output_text
if is_peft_available():
from peft import LoraConfig, PeftModel
class SFTTrainerTester(unittest.TestCase):
r""" """
@classmethod
def setUpClass(cls):
cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab"
cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id)
cls.tokenizer.pad_token = cls.tokenizer.eos_token
cls.dummy_dataset = Dataset.from_dict(
{
"question": [
"Does llamas know how to code?",
"Does llamas know how to fly?",
"Does llamas know how to talk?",
"Does llamas know how to code?",
"Does llamas know how to fly?",
"Does llamas know how to talk?",
"Does llamas know how to swim?",
],
"answer": [
"Yes, llamas are very good at coding.",
"No, llamas can't fly.",
"Yes, llamas are very good at talking.",
"Yes, llamas are very good at coding.",
"No, llamas can't fly.",
"Yes, llamas are very good at talking.",
"No, llamas can't swim.",
],
"text": [
"### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.",
"### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.",
"### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.",
"### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.",
"### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.",
"### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.",
"### Question: Does llamas know how to swim?\n ### Answer: No, llamas can't swim.",
],
}
)
cls.dummy_chatml_dataset = Dataset.from_dict(
{
"messages": [
[
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi, how can I help you?"},
{"role": "user", "content": "What is 2+2?"},
{"role": "assistant", "content": "4"},
{"role": "user", "content": "What is 3+3?"},
{"role": "assistant", "content": "6"},
],
[
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi, how can I help you?"},
],
]
}
)
cls.dummy_instruction_dataset = Dataset.from_list(
[
{"prompt": "What is 2+2?", "completion": "4"},
{"prompt": "What is 3+3?", "completion": "6"},
{"prompt": "What is 4+4?", "completion": "8"},
{"prompt": "What is 2+2?", "completion": "4"},
{"prompt": "What is 3+3?", "completion": "6"},
{"prompt": "What is 4+4?", "completion": "8"},
{"prompt": "What is 2+2?", "completion": "4"},
{"prompt": "What is 3+3?", "completion": "6"},
{"prompt": "What is 4+4?", "completion": "8"},
{"prompt": "What is 2+2?", "completion": "4"},
{"prompt": "What is 3+3?", "completion": "6"},
{"prompt": "What is 4+4?", "completion": "8"},
]
)
cls.train_dataset = ConstantLengthDataset(
cls.tokenizer,
cls.dummy_dataset,
dataset_text_field=None,
formatting_func=formatting_prompts_func,
seq_length=16,
num_of_sequences=16,
)
cls.eval_dataset = ConstantLengthDataset(
cls.tokenizer,
cls.dummy_dataset,
dataset_text_field=None,
formatting_func=formatting_prompts_func,
seq_length=16,
num_of_sequences=16,
)
def test_constant_length_dataset(self):
formatted_dataset = ConstantLengthDataset(
self.tokenizer,
self.dummy_dataset,
dataset_text_field=None,
formatting_func=formatting_prompts_func,
)
self.assertTrue(len(formatted_dataset) == len(self.dummy_dataset))
self.assertTrue(len(formatted_dataset) > 0)
for example in formatted_dataset:
self.assertTrue("input_ids" in example)
self.assertTrue("labels" in example)
self.assertTrue(len(example["input_ids"]) == formatted_dataset.seq_length)
self.assertTrue(len(example["labels"]) == formatted_dataset.seq_length)
decoded_text = self.tokenizer.decode(example["input_ids"])
self.assertTrue(("Question" in decoded_text) and ("Answer" in decoded_text))
def test_sft_trainer(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=4,
eval_steps=2,
save_steps=2,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model_id,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
def test_sft_trainer_uncorrect_data(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
per_device_train_batch_size=2,
)
with self.assertRaises(ValueError):
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
packing=True,
)
# this should work since the dummy chatml include the correct format
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_chatml_dataset,
max_seq_length=32, # make sure there is at least 1 packed sequence
num_of_sequences=32,
packing=True,
)
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_chatml_dataset,
packing=False,
)
# this should work since the dummy instruction dataset is the correct format
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_instruction_dataset,
max_seq_length=16, # make sure there is at least 1 packed sequence
packing=True,
)
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_instruction_dataset,
packing=False,
)
# This should work
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
formatting_func=formatting_prompts_func,
max_seq_length=32, # make sure there is at least 1 packed sequence
packing=True,
)
with self.assertRaises(ValueError):
# This should not work because not enough data for one sample
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
formatting_func=formatting_prompts_func,
max_seq_length=1024, # make sure there is NOT at least 1 packed sequence
packing=True,
)
# This should not work as well
with self.assertRaises(ValueError):
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
formatting_func=formatting_prompts_func,
packing=False,
)
# but this should work
_ = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
formatting_func=formatting_prompts_func_batched,
packing=False,
)
def test_sft_trainer_with_model_num_train_epochs(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
num_train_epochs=2,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
save_steps=1,
num_train_epochs=2,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
dataset_text_field="text",
max_seq_length=16,
num_of_sequences=16,
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
save_steps=1,
num_train_epochs=2,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
dataset_text_field="text",
max_seq_length=16,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1"))
def test_sft_trainer_with_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
dataset_text_field="text",
max_seq_length=16,
num_of_sequences=16,
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
# with formatting_func + packed
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
formatting_func=formatting_prompts_func,
max_seq_length=16,
num_of_sequences=16,
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
# with formatting_func + packed
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
formatting_func=formatting_prompts_func_batched,
max_seq_length=16,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.dummy_dataset,
dataset_text_field="text",
max_seq_length=16,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1"))
def test_sft_trainer_with_multiple_eval_datasets(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=1,
eval_steps=1,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model_id,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset={
"data1": self.eval_dataset,
"data2": self.eval_dataset,
},
packing=True,
)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_data1_loss"])
self.assertIsNotNone(trainer.state.log_history[1]["eval_data2_loss"])
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1"))
def test_data_collator_completion_lm(self):
response_template = "### Response:\n"
data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=self.tokenizer, mlm=False)
text = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly."""
encoded_text = self.tokenizer(text)
examples = [encoded_text]
batch = data_collator(examples)
labels = batch["labels"]
last_pad_idx = np.where(labels == -100)[1][-1]
result_text = self.tokenizer.decode(batch["input_ids"][0, last_pad_idx + 1 :])
self.assertEqual(result_text, "I have not been masked correctly.")
def test_data_collator_completion_lm_with_multiple_text(self):
tokenizer = copy.deepcopy(self.tokenizer)
tokenizer.padding_side = "left"
response_template = "### Response:\n"
data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer, mlm=False)
text1 = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly."""
text2 = """\n\n### Instructions:\nThis is another longer text that should also be masked. This text is significantly longer than the previous one.\n\n### Response:\nI have not been masked correctly."""
encoded_text1 = tokenizer(text1)
encoded_text2 = tokenizer(text2)
examples = [encoded_text1, encoded_text2]
batch = data_collator(examples)
for i in range(2):
labels = batch["labels"][i]
last_pad_idx = np.where(labels == -100)[0][-1]
result_text = tokenizer.decode(batch["input_ids"][i, last_pad_idx + 1 :])
self.assertEqual(result_text, "I have not been masked correctly.")
def test_data_collator_chat_completion_lm(self):
instruction_template = "### Human:"
assistant_template = "### Assistant:"
data_collator = DataCollatorForCompletionOnlyLM(
response_template=assistant_template,
instruction_template=instruction_template,
tokenizer=self.tokenizer,
mlm=False,
)
text = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too."""
encoded_text = self.tokenizer(text)
examples = [encoded_text]
batch = data_collator(examples)
labels = batch["labels"]
non_masked_tokens = batch["input_ids"][labels != -100]
result_text = self.tokenizer.decode(non_masked_tokens)
self.assertEqual(result_text, " I should not be masked. I should not be masked too.")
def test_data_collator_chat_completion_lm_with_multiple_text(self):
tokenizer = copy.deepcopy(self.tokenizer)
tokenizer.padding_side = "left"
instruction_template = "### Human:"
assistant_template = "### Assistant:"
data_collator = DataCollatorForCompletionOnlyLM(
response_template=assistant_template,
instruction_template=instruction_template,
tokenizer=tokenizer,
mlm=False,
)
text1 = """### Human: Hello all this should be masked.### Assistant: I should not be masked."""
text2 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too."""
encoded_text1 = tokenizer(text1)
encoded_text2 = tokenizer(text2)
examples = [encoded_text1, encoded_text2]
batch = data_collator(examples)
labels = batch["labels"]
input_ids = batch["input_ids"]
non_masked_tokens1 = input_ids[0][labels[0] != -100]
result_text1 = tokenizer.decode(non_masked_tokens1)
self.assertEqual(result_text1, " I should not be masked.")
non_masked_tokens2 = input_ids[1][labels[1] != -100]
result_text2 = tokenizer.decode(non_masked_tokens2)
self.assertEqual(result_text2, " I should not be masked. I should not be masked too.")
def test_sft_trainer_infinite_with_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=5,
eval_steps=1,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=True,
max_seq_length=500,
)
self.assertTrue(trainer.train_dataset.infinite)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
# make sure the trainer did 5 steps
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-5"))
def test_sft_trainer_infinite_with_model_epochs(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_strategy="epoch",
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=True,
max_seq_length=500,
)
self.assertFalse(trainer.train_dataset.infinite)
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# make sure the trainer did 5 steps
self.assertTrue("model.safetensors" in os.listdir(tmp_dir + "/checkpoint-4"))
def test_sft_trainer_with_model_neftune(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=2,
eval_steps=1,
save_steps=1,
per_device_train_batch_size=2,
)
trainer = SFTTrainer(
model=self.model,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
neftune_noise_alpha=5,
packing=True,
)
trainer.model = trainer._trl_activate_neftune(trainer.model)
device = trainer.model.get_input_embeddings().weight.device
trainer.model.train()
torch.random.manual_seed(42)
embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device))
torch.random.manual_seed(24)
embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device))
self.assertFalse(torch.allclose(embeds_neftune, embeds_neftune_2))
self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) > 0)
trainer.neftune_hook_handle.remove()
trainer.train()
# Make sure forward pass works fine
_ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device))
self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0)
@require_peft
def test_peft_sft_trainer(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=4,
eval_steps=2,
save_steps=2,
per_device_train_batch_size=2,
)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
trainer = SFTTrainer(
model=self.model_id,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
peft_config=peft_config,
packing=True,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
self.assertTrue("adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
self.assertTrue("adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2"))
self.assertTrue("model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2"))
@require_peft
def test_peft_sft_trainer_gc(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=4,
eval_steps=2,
save_steps=2,
per_device_train_batch_size=2,
gradient_checkpointing=True,
)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
trainer = SFTTrainer(
model=self.model_id,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
peft_config=peft_config,
packing=True,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
self.assertTrue("adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
self.assertTrue("adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2"))
self.assertTrue("model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2"))
@require_peft
def test_peft_sft_trainer_neftune(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
dataloader_drop_last=True,
evaluation_strategy="steps",
max_steps=4,
eval_steps=2,
save_steps=2,
per_device_train_batch_size=2,
)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
trainer = SFTTrainer(
model=self.model_id,
args=training_args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
peft_config=peft_config,
neftune_noise_alpha=5,
packing=True,
)
trainer.model = trainer._trl_activate_neftune(trainer.model)
self.assertTrue(isinstance(trainer.model, PeftModel))
device = trainer.model.get_input_embeddings().weight.device
trainer.model.train()
torch.random.manual_seed(42)
embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device))
torch.random.manual_seed(24)
embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device))
self.assertFalse(torch.allclose(embeds_neftune, embeds_neftune_2))
self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) > 0)
trainer.neftune_hook_handle.remove()
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"])
self.assertTrue("adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2"))
self.assertTrue("adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2"))
self.assertTrue("model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2"))
# Make sure forward pass works fine to check if embeddings forward is not broken.
_ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device))
self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_dataset_formatting.py | import unittest
from typing import Callable
from datasets import Dataset, load_dataset
from transformers import AutoTokenizer
from trl.extras.dataset_formatting import get_formatting_func_from_dataset
class DatasetFormattingTestCase(unittest.TestCase):
def setUp(self):
self.llama_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
self.chatml_tokenizer = AutoTokenizer.from_pretrained("philschmid/gpt2-chatml-tokenizer")
def test_get_formatting_func_from_dataset_with_chatml_messages(self):
dataset = Dataset.from_dict(
{
"messages": [
[
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi, how can I help you?"},
]
]
}
)
# Llama tokenizer
formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer)
self.assertTrue(isinstance(formatting_func, Callable))
formatted_text = formatting_func(dataset[0])
self.assertEqual(
formatted_text,
"<s>[INST] <<SYS>>\nYou are helpful\n<</SYS>>\n\nHello [/INST] Hi, how can I help you? </s>",
)
formatted_text = formatting_func(dataset[0:1])
self.assertEqual(
formatted_text,
["<s>[INST] <<SYS>>\nYou are helpful\n<</SYS>>\n\nHello [/INST] Hi, how can I help you? </s>"],
)
# ChatML tokenizer
formatting_func = get_formatting_func_from_dataset(dataset, self.chatml_tokenizer)
formatted_text = formatting_func(dataset[0])
self.assertEqual(
formatted_text,
"<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n",
)
formatted_text = formatting_func(dataset[0:1])
self.assertEqual(
formatted_text,
[
"<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n"
],
)
def test_get_formatting_func_from_dataset_with_chatml_conversations(self):
dataset = Dataset.from_dict(
{
"conversations": [
[
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi, how can I help you?"},
]
]
}
)
# Llama tokenizer
formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer)
self.assertTrue(isinstance(formatting_func, Callable))
formatted_text = formatting_func(dataset[0])
self.assertEqual(
formatted_text,
"<s>[INST] <<SYS>>\nYou are helpful\n<</SYS>>\n\nHello [/INST] Hi, how can I help you? </s>",
)
formatted_text = formatting_func(dataset[0:1])
self.assertEqual(
formatted_text,
["<s>[INST] <<SYS>>\nYou are helpful\n<</SYS>>\n\nHello [/INST] Hi, how can I help you? </s>"],
)
# ChatML tokenizer
formatting_func = get_formatting_func_from_dataset(dataset, self.chatml_tokenizer)
formatted_text = formatting_func(dataset[0])
self.assertEqual(
formatted_text,
"<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n",
)
formatted_text = formatting_func(dataset[0:1])
self.assertEqual(
formatted_text,
[
"<|im_start|>system\nYou are helpful<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi, how can I help you?<|im_end|>\n"
],
)
def test_get_formatting_func_from_dataset_with_instruction(self):
dataset = Dataset.from_list(
[{"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}]
)
formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer)
self.assertIsNotNone(formatting_func)
self.assertTrue(isinstance(formatting_func, Callable))
formatted_text = formatting_func(dataset[0])
self.assertEqual(formatted_text, "<s>[INST] What is 2+2? [/INST] 4 </s>")
formatted_text = formatting_func(dataset[0:1])
self.assertEqual(formatted_text, ["<s>[INST] What is 2+2? [/INST] 4 </s>"])
def test_get_formatting_func_from_dataset_from_hub(self):
ds_1 = load_dataset("philschmid/trl-test-instruction", split="train")
ds_2 = load_dataset("philschmid/dolly-15k-oai-style", split="train")
for ds in [ds_1, ds_2]:
formatting_func = get_formatting_func_from_dataset(ds, self.llama_tokenizer)
self.assertIsNotNone(formatting_func)
self.assertTrue(isinstance(formatting_func, Callable))
ds_3 = load_dataset("philschmid/guanaco-sharegpt-style", split="train")
formatting_func = get_formatting_func_from_dataset(ds_3, self.llama_tokenizer)
self.assertIsNone(formatting_func)
def test_get_formatting_func_from_dataset_with_unknown_format(self):
dataset = Dataset.from_dict({"text": "test"})
formatting_func = get_formatting_func_from_dataset(dataset, self.llama_tokenizer)
self.assertIsNone(formatting_func)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/tests/test_core.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from trl.core import masked_mean, masked_var, masked_whiten, whiten
class CoreTester(unittest.TestCase):
"""
A wrapper class for testing core utils functions
"""
@classmethod
def setUpClass(cls):
cls.test_input = torch.Tensor([1, 2, 3, 4])
cls.test_mask = torch.Tensor([0, 1, 1, 0])
cls.test_input_unmasked = cls.test_input[1:3]
def test_masked_mean(self):
self.assertEqual(torch.mean(self.test_input_unmasked), masked_mean(self.test_input, self.test_mask))
def test_masked_var(self):
self.assertEqual(torch.var(self.test_input_unmasked), masked_var(self.test_input, self.test_mask))
def test_masked_whiten(self):
whiten_unmasked = whiten(self.test_input_unmasked)
whiten_masked = masked_whiten(self.test_input, self.test_mask)[1:3]
diffs = (whiten_unmasked - whiten_masked).sum()
self.assertAlmostEqual(diffs, 0)
| 0 |
hf_public_repos/trl/tests | hf_public_repos/trl/tests/slow/testing_constants.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: push them under trl-org
MODELS_TO_TEST = [
"HuggingFaceM4/tiny-random-LlamaForCausalLM",
"HuggingFaceM4/tiny-random-MistralForCausalLM",
]
# We could have also not declared these variables but let's be verbose
PACKING_OPTIONS = [True, False]
GRADIENT_CHECKPOINTING_KWARGS = [None, {"use_reentrant": False}, {"use_reentrant": True}]
DEVICE_MAP_OPTIONS = [{"": 0}, "auto"]
DPO_LOSS_TYPES = ["sigmoid", "ipo", "kto_pair"]
DPO_PRECOMPUTE_LOGITS = [True, False]
| 0 |
hf_public_repos/trl/tests | hf_public_repos/trl/tests/slow/test_dpo_slow.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import itertools
import tempfile
import unittest
import torch
from accelerate.utils.memory import release_memory
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments
from trl import DPOTrainer, is_peft_available
from ..testing_utils import require_bitsandbytes, require_peft, require_torch_gpu, require_torch_multi_gpu
from .testing_constants import DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST
if is_peft_available():
from peft import LoraConfig, PeftModel
@require_torch_gpu
class DPOTrainerSlowTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dataset = load_dataset("trl-internal-testing/mlabonne-chatml-dpo-pairs-copy", split="train[:10%]")
cls.peft_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=8,
bias="none",
task_type="CAUSAL_LM",
)
cls.max_length = 128
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
gc.collect()
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS)))
def test_dpo_bare_model(self, model_id, loss_type, pre_compute_logits):
"""
A test that tests the simple usage of `DPOTrainer` using a bare model in full precision.
"""
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=2,
remove_unused_columns=False,
gradient_accumulation_steps=2,
learning_rate=9e-1,
evaluation_strategy="steps",
fp16=True,
logging_strategy="no",
report_to="none",
)
# dpo train lora model
trainer = DPOTrainer(
model=model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=tokenizer,
train_dataset=self.dataset,
eval_dataset=self.dataset,
loss_type=loss_type,
precompute_ref_log_probs=pre_compute_logits,
max_length=self.max_length,
)
# train the model
trainer.train()
# save trained model or adapter
trainer.save_model()
release_memory(model, trainer)
@parameterized.expand(
list(
itertools.product(
MODELS_TO_TEST,
DPO_LOSS_TYPES,
DPO_PRECOMPUTE_LOGITS,
GRADIENT_CHECKPOINTING_KWARGS,
)
)
)
@require_peft
def test_dpo_peft_model(self, model_id, loss_type, pre_compute_logits, gradient_checkpointing_kwargs):
"""
A test that tests the simple usage of `DPOTrainer` using a peft model in full precision + different scenarios of gradient checkpointing.
"""
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=2,
remove_unused_columns=False,
gradient_accumulation_steps=2,
learning_rate=9e-1,
evaluation_strategy="steps",
fp16=True,
logging_strategy="no",
report_to="none",
gradient_checkpointing=True,
gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
)
# dpo train lora model
trainer = DPOTrainer(
model=model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=tokenizer,
train_dataset=self.dataset,
eval_dataset=self.dataset,
generate_during_eval=False,
loss_type=loss_type,
precompute_ref_log_probs=pre_compute_logits,
peft_config=self.peft_config,
max_length=self.max_length,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
self.assertTrue(trainer.ref_model is None)
# train the model
trainer.train()
# save trained model or adapter
trainer.save_model()
release_memory(model, trainer)
@parameterized.expand(
list(
itertools.product(
MODELS_TO_TEST,
DPO_LOSS_TYPES,
DPO_PRECOMPUTE_LOGITS,
GRADIENT_CHECKPOINTING_KWARGS,
)
)
)
@require_bitsandbytes
@require_peft
def test_dpo_peft_model_qlora(self, model_id, loss_type, pre_compute_logits, gradient_checkpointing_kwargs):
"""
A test that tests the simple usage of `DPOTrainer` using QLoRA + different scenarios of gradient checkpointing.
"""
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
tokenizer = AutoTokenizer.from_pretrained(model_id)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=2,
remove_unused_columns=False,
gradient_accumulation_steps=2,
learning_rate=9e-1,
evaluation_strategy="steps",
fp16=True,
logging_strategy="no",
report_to="none",
gradient_checkpointing=True,
gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
)
# dpo train lora model
trainer = DPOTrainer(
model=model,
ref_model=None,
beta=0.1,
args=training_args,
tokenizer=tokenizer,
train_dataset=self.dataset,
eval_dataset=self.dataset,
generate_during_eval=False,
loss_type=loss_type,
precompute_ref_log_probs=pre_compute_logits,
peft_config=self.peft_config,
max_length=self.max_length,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
self.assertTrue(trainer.ref_model is None)
# train the model
trainer.train()
# save trained model or adapter
trainer.save_model()
release_memory(model, trainer)
@require_torch_multi_gpu
class DPOTrainerSlowTesterMultiGPU(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dataset = load_dataset("trl-internal-testing/mlabonne-chatml-dpo-pairs-copy", split="train[:10%]")
cls.peft_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=8,
bias="none",
task_type="CAUSAL_LM",
)
cls.max_length = 128
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
gc.collect()
@parameterized.expand(
list(
itertools.product(
MODELS_TO_TEST,
DPO_LOSS_TYPES,
DPO_PRECOMPUTE_LOGITS,
)
)
)
@require_bitsandbytes
@require_peft
def test_dpo_peft_model_qlora_multi(self, model_id, loss_type, pre_compute_logits):
"""
A test that tests the simple usage of `DPOTrainer` using QLoRA + different scenarios of gradient checkpointing.
"""
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
# Currently accelerate does not let you let's say the reference and active model
# on different devices. The canonical way to do so is either load them on the same device
# or just use a single model with PEFT.
model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=quantization_config, device_map={"": 0}
)
ref_model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=quantization_config, device_map={"": 0}
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=2,
remove_unused_columns=False,
gradient_accumulation_steps=2,
learning_rate=9e-1,
evaluation_strategy="steps",
fp16=True,
logging_strategy="no",
report_to="none",
gradient_checkpointing=True,
)
# dpo train lora model
trainer = DPOTrainer(
model=model,
ref_model=ref_model,
beta=0.1,
args=training_args,
tokenizer=tokenizer,
train_dataset=self.dataset,
eval_dataset=self.dataset,
generate_during_eval=False,
loss_type=loss_type,
precompute_ref_log_probs=pre_compute_logits,
peft_config=self.peft_config,
max_length=self.max_length,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
# train the model
trainer.train()
# save trained model or adapter
trainer.save_model()
release_memory(model, trainer)
| 0 |
hf_public_repos/trl/tests | hf_public_repos/trl/tests/slow/test_sft_slow.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import itertools
import tempfile
import unittest
import torch
from accelerate.utils.memory import release_memory
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments
from trl import SFTTrainer, is_peft_available
from ..testing_utils import require_bitsandbytes, require_peft, require_torch_gpu, require_torch_multi_gpu
from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS
if is_peft_available():
from peft import LoraConfig, PeftModel
@require_torch_gpu
class SFTTrainerSlowTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.train_dataset = load_dataset("imdb", split="train[:10%]")
cls.eval_dataset = load_dataset("imdb", split="test[:10%]")
cls.dataset_text_field = "text"
cls.max_seq_length = 128
cls.peft_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=8,
bias="none",
task_type="CAUSAL_LM",
)
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
gc.collect()
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
def test_sft_trainer_str(self, model_name, packing):
"""
Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer
as expected.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
)
trainer = SFTTrainer(
model_name,
args=args,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
)
trainer.train()
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
def test_sft_trainer_transformers(self, model_name, packing):
"""
Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
as expected.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
)
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
)
trainer.train()
release_memory(model, trainer)
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
@require_peft
def test_sft_trainer_peft(self, model_name, packing):
"""
Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer
as expected.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
fp16=True,
)
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
peft_config=self.peft_config,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
trainer.train()
release_memory(model, trainer)
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
def test_sft_trainer_transformers_mp(self, model_name, packing):
"""
Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
as expected in mixed precision.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
fp16=True, # this is sufficient to enable amp
)
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
)
trainer.train()
release_memory(model, trainer)
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS)))
def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs):
"""
Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
as expected in mixed precision + different scenarios of gradient_checkpointing.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
fp16=True, # this is sufficient to enable amp
gradient_checkpointing=True,
gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
)
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
)
trainer.train()
release_memory(model, trainer)
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS)))
@require_peft
def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs):
"""
Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer
as expected in mixed precision + different scenarios of gradient_checkpointing.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
fp16=True, # this is sufficient to enable amp
gradient_checkpointing=True,
gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
)
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
peft_config=self.peft_config,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
trainer.train()
release_memory(model, trainer)
@parameterized.expand(
list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS))
)
@require_torch_multi_gpu
def test_sft_trainer_transformers_mp_gc_device_map(
self, model_name, packing, gradient_checkpointing_kwargs, device_map
):
"""
Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc).
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
fp16=True, # this is sufficient to enable amp
gradient_checkpointing=True,
gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
)
trainer.train()
release_memory(model, trainer)
@parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS)))
@require_peft
@require_bitsandbytes
def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs):
"""
Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer
as expected in mixed precision + different scenarios of gradient_checkpointing.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
output_dir=tmp_dir,
logging_strategy="no",
report_to="none",
per_device_train_batch_size=2,
max_steps=10,
fp16=True, # this is sufficient to enable amp
gradient_checkpointing=True,
gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
)
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
trainer = SFTTrainer(
model,
args=args,
tokenizer=tokenizer,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
packing=packing,
dataset_text_field=self.dataset_text_field,
max_seq_length=self.max_seq_length,
peft_config=self.peft_config,
)
self.assertTrue(isinstance(trainer.model, PeftModel))
trainer.train()
release_memory(model, trainer)
| 0 |
hf_public_repos/trl | hf_public_repos/trl/trl/core.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import warnings
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from transformers import top_k_top_p_filtering
from .import_utils import is_npu_available, is_xpu_available
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
WANDB_PADDING = -1
def flatten_dict(nested: Dict, sep: str = "/") -> Dict:
"""Flatten dictionary and concatenate nested keys with separator."""
def recurse(nest: Dict, prefix: str, into: Dict) -> None:
for k, v in nest.items():
if sep in k:
raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'")
if isinstance(v, Mapping):
recurse(v, prefix + k + sep, into)
else:
into[prefix + k] = v
flat = {}
recurse(nested, "", flat)
return flat
def convert_to_scalar(stats: Dict) -> Dict:
"""
Converts the stats from a flattened dict to single scalar dicts
"""
tensorboard_stats = {}
for k, v in stats.items():
# for tensorboard compatibility - arrays and tensors are ignored with tensorboard
# therefore we convert single element tensors to scalars
if (isinstance(v, torch.Tensor) or isinstance(v, np.ndarray)) and (
len(v.shape) == 0 or (len(v.shape) == 1 and v.shape[0] == 1)
):
v = v.item()
tensorboard_stats[k] = v
return tensorboard_stats
def stack_dicts(stats_dicts: List[Dict]) -> Dict:
"""Stack the values of a dict."""
results = dict()
for k in stats_dicts[0]:
stats_list = [torch.flatten(d[k]) for d in stats_dicts]
results[k] = pad_sequence(stats_list, batch_first=True, padding_value=WANDB_PADDING)
return results
def add_suffix(input_dict: Dict, suffix: str) -> Dict:
"""Add suffix to dict keys."""
return dict((k + suffix, v) for k, v in input_dict.items())
def pad_to_size(tensor: torch.Tensor, size: int, dim: int = 1, padding: int = 50256) -> torch.Tensor:
"""Pad tensor to size."""
t_size = tensor.size()[dim]
if t_size == size:
return tensor
else:
return torch.nn.functional.pad(tensor, (0, size - t_size), "constant", padding)
def logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor, gather: bool = True) -> torch.Tensor:
"""
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
"""
logp = F.log_softmax(logits, dim=2)
if not gather:
return logp
logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1)
return logpy
def whiten(values: torch.Tensor, shift_mean: bool = True) -> torch.Tensor:
"""Whiten values."""
mean, var = torch.mean(values), torch.var(values)
whitened = (values - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def masked_mean(values: torch.Tensor, mask: torch.Tensor, axis: bool = None) -> torch.Tensor:
"""Compute mean of tensor with a masked values."""
if axis is not None:
return (values * mask).sum(axis=axis) / mask.sum(axis=axis)
else:
return (values * mask).sum() / mask.sum()
def masked_var(values: torch.Tensor, mask: torch.Tensor, unbiased: bool = True) -> torch.Tensor:
"""Compute variance of tensor with masked values."""
mean = masked_mean(values, mask)
centered_values = values - mean
variance = masked_mean(centered_values**2, mask)
if unbiased:
mask_sum = mask.sum()
if mask_sum == 0:
raise ValueError(
"The sum of the mask is zero, which can happen when `mini_batch_size=1`;"
"try increase the `mini_batch_size` or `gradient_accumulation_steps`"
)
# note that if mask_sum == 1, then there is a division by zero issue
# to avoid it you just need to use a larger minibatch_size
bessel_correction = mask_sum / (mask_sum - 1)
variance = variance * bessel_correction
return variance
def masked_whiten(values: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True) -> torch.Tensor:
"""Whiten values with masked values."""
mean, var = masked_mean(values, mask), masked_var(values, mask)
whitened = (values - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def clip_by_value(x: torch.Tensor, tensor_min: float, tensor_max: float) -> torch.Tensor:
"""
Tensor extension to torch.clamp
https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
"""
clipped = torch.max(torch.min(x, tensor_max), tensor_min)
return clipped
def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor:
"""Calculate entropy from logits."""
pd = torch.nn.functional.softmax(logits, dim=-1)
entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1)
return entropy
def average_torch_dicts(list_of_dicts: List[Dict]) -> Dict:
"""Average values of a list of dicts with torch tensors."""
average_dict = dict()
for key in list_of_dicts[0].keys():
average_dict[key] = torch.mean(torch.stack([d[key] for d in list_of_dicts]), axis=0)
return average_dict
def stats_to_np(stats_dict: Dict) -> Dict:
"""Cast all torch.tensors in dict to numpy arrays."""
new_dict = dict()
for k, v in stats_dict.items():
if isinstance(v, torch.Tensor):
new_dict[k] = v.detach().cpu()
if new_dict[k].dtype == torch.bfloat16:
new_dict[k] = new_dict[k].float()
new_dict[k] = new_dict[k].numpy()
else:
new_dict[k] = v
if np.isscalar(new_dict[k]):
new_dict[k] = float(new_dict[k])
return new_dict
def respond_to_batch(
model: nn.Module, queries: List[torch.LongTensor], txt_len: int = 20, top_k: int = 0, top_p: float = 1.0
) -> torch.LongTensor:
"""Sample text from language model."""
input_ids = queries
for i in range(txt_len):
# Get Logits
outputs = model(input_ids)
next_token_logits = outputs[0][:, -1, :]
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=-1)
return input_ids[:, -txt_len:]
def set_seed(seed: int) -> None:
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, and `torch`.
Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if is_xpu_available():
torch.xpu.manual_seed_all(seed)
elif is_npu_available():
torch.npu.manual_seed_all(seed)
else:
torch.cuda.manual_seed_all(seed)
class LengthSampler:
"""
Samples a length
"""
def __init__(self, min_value: int, max_value: int):
self.values = list(range(min_value, max_value))
def __call__(self) -> int:
return np.random.choice(self.values)
class PPODecorators(object):
optimize_device_cache = False
@classmethod
@contextmanager
def empty_device_cache(cls):
yield
if cls.optimize_device_cache:
if is_xpu_available():
gc.collect()
torch.xpu.empty_cache()
gc.collect()
elif is_npu_available():
gc.collect()
torch.npu.empty_cache()
gc.collect()
elif torch.cuda.is_available():
gc.collect()
torch.cuda.empty_cache()
gc.collect()
def randn_tensor(
shape: Union[Tuple, List],
generator: Optional[Union[List[torch.Generator], torch.Generator]] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
layout: Optional[torch.layout] = None,
) -> torch.Tensor:
"""A helper function to create random tensors on the desired `device` with the desired `dtype`. When
passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor
is always created on the CPU.
"""
# device on which tensor is created defaults to device
rand_device = device
batch_size = shape[0]
layout = layout or torch.strided
device = device or torch.device("cpu")
if generator is not None:
gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type
if gen_device_type != device.type and gen_device_type == "cpu":
rand_device = "cpu"
if device != "mps":
warnings.warn(
f"The passed generator was created on 'cpu' even though a tensor on {device} was expected."
f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably"
f" slighly speed up this function by passing a generator that was created on the {device} device."
)
elif gen_device_type != device.type and gen_device_type == "cuda":
raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.")
# make sure generator list of length 1 is treated like a non-list
if isinstance(generator, list) and len(generator) == 1:
generator = generator[0]
if isinstance(generator, list):
shape = (1,) + shape[1:]
latents = [
torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)
for i in range(batch_size)
]
latents = torch.cat(latents, dim=0).to(device)
else:
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)
return latents
| 0 |
hf_public_repos/trl | hf_public_repos/trl/trl/import_utils.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
if sys.version_info < (3, 8):
_is_python_greater_3_8 = False
else:
_is_python_greater_3_8 = True
def is_peft_available() -> bool:
return importlib.util.find_spec("peft") is not None
def is_unsloth_available() -> bool:
return importlib.util.find_spec("unsloth") is not None
def is_accelerate_greater_20_0() -> bool:
if _is_python_greater_3_8:
from importlib.metadata import version
accelerate_version = version("accelerate")
else:
import pkg_resources
accelerate_version = pkg_resources.get_distribution("accelerate").version
return accelerate_version >= "0.20.0"
def is_transformers_greater_than(version: str) -> bool:
_transformers_version = importlib.metadata.version("transformers")
return _transformers_version > version
def is_torch_greater_2_0() -> bool:
if _is_python_greater_3_8:
from importlib.metadata import version
torch_version = version("torch")
else:
import pkg_resources
torch_version = pkg_resources.get_distribution("torch").version
return torch_version >= "2.0"
def is_diffusers_available() -> bool:
return importlib.util.find_spec("diffusers") is not None
def is_bitsandbytes_available() -> bool:
import torch
# bnb can be imported without GPU but is not usable.
return importlib.util.find_spec("bitsandbytes") is not None and torch.cuda.is_available()
def is_torchvision_available() -> bool:
return importlib.util.find_spec("torchvision") is not None
def is_rich_available() -> bool:
return importlib.util.find_spec("rich") is not None
def is_wandb_available() -> bool:
return importlib.util.find_spec("wandb") is not None
def is_xpu_available() -> bool:
if is_accelerate_greater_20_0():
import accelerate
return accelerate.utils.is_xpu_available()
else:
if importlib.util.find_spec("intel_extension_for_pytorch") is None:
return False
try:
import torch
return hasattr(torch, "xpu") and torch.xpu.is_available()
except RuntimeError:
return False
def is_npu_available() -> bool:
"""Checks if `torch_npu` is installed and potentially if a NPU is in the environment"""
if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None:
return False
import torch
import torch_npu # noqa: F401
return hasattr(torch, "npu") and torch.npu.is_available()
| 0 |
hf_public_repos/trl | hf_public_repos/trl/trl/__init__.py | # flake8: noqa
__version__ = "0.7.10.dev0"
from .core import set_seed
from .environment import TextEnvironment, TextHistory
from .extras import BestOfNSampler
from .import_utils import (
is_bitsandbytes_available,
is_diffusers_available,
is_npu_available,
is_peft_available,
is_wandb_available,
is_xpu_available,
)
from .models import (
AutoModelForCausalLMWithValueHead,
AutoModelForSeq2SeqLMWithValueHead,
PreTrainedModelWrapper,
create_reference_model,
)
from .trainer import (
DataCollatorForCompletionOnlyLM,
DPOTrainer,
IterativeSFTTrainer,
PPOConfig,
PPOTrainer,
RewardConfig,
RewardTrainer,
SFTTrainer,
)
if is_diffusers_available():
from .models import (
DDPOPipelineOutput,
DDPOSchedulerOutput,
DDPOStableDiffusionPipeline,
DefaultDDPOStableDiffusionPipeline,
)
from .trainer import DDPOConfig, DDPOTrainer
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/models/modeling_value_head.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM
from .modeling_base import PreTrainedModelWrapper
class ValueHead(nn.Module):
r"""
The ValueHead class implements a head for GPT2 that returns a scalar for each output token.
"""
def __init__(self, config, **kwargs):
super().__init__()
if not hasattr(config, "summary_dropout_prob"):
summary_dropout_prob = kwargs.pop("summary_dropout_prob", 0.1)
else:
summary_dropout_prob = config.summary_dropout_prob
self.dropout = nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity()
# some models such as OPT have a projection layer before the word embeddings - e.g. OPT-350m
if hasattr(config, "hidden_size"):
hidden_size = config.hidden_size
if hasattr(config, "word_embed_proj_dim"):
hidden_size = config.word_embed_proj_dim
elif hasattr(config, "is_encoder_decoder"):
if config.is_encoder_decoder and hasattr(config, "decoder"):
if hasattr(config.decoder, "hidden_size"):
hidden_size = config.decoder.hidden_size
self.summary = nn.Linear(hidden_size, 1)
self.flatten = nn.Flatten()
def forward(self, hidden_states):
output = self.dropout(hidden_states)
# For now force upcast in fp32 if needed. Let's keep the
# output in fp32 for numerical stability.
if output.dtype != self.summary.weight.dtype:
output = output.to(self.summary.weight.dtype)
output = self.summary(output)
return output
class AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):
r"""
An autoregressive model with a value head in addition to the language model head.
This class inherits from `~trl.PreTrainedModelWrapper` and wraps a
`transformers.PreTrainedModel` class. The wrapper class supports classic functions
such as `from_pretrained`, `push_to_hub` and `generate`. To call a method of the wrapped
model, simply manipulate the `pretrained_model` attribute of this class.
Class attributes:
- **transformers_parent_class** (`transformers.PreTrainedModel`) -- The parent class of the wrapped model. This
should be set to `transformers.AutoModelForCausalLM` for this class.
- **lm_head_namings** (`tuple`) -- A tuple of strings that are used to identify the language model head of the
wrapped model. This is set to `("lm_head", "embed_out")` for this class but can be changed for other models
in the future
- **supported_args** (`tuple`) -- A tuple of strings that are used to identify the arguments that are supported
by the `ValueHead` class. Currently, the supported args are:
- **summary_dropout_prob** (`float`, `optional`, defaults to `None`) -- The dropout probability for the
`ValueHead` class.
- **v_head_initializer_range** (`float`, `optional`, defaults to `0.2`) -- The initializer range for the
`ValueHead` if a specific initialization strategy is selected.
- **v_head_init_strategy** (`str`, `optional`, defaults to `None`) -- The initialization strategy for the
`ValueHead`. Currently, the supported strategies are:
- **`None`** -- Initializes the weights of the `ValueHead` with a random distribution. This is the default
strategy.
- **"normal"** -- Initializes the weights of the `ValueHead` with a normal distribution.
"""
transformers_parent_class = AutoModelForCausalLM
lm_head_namings = ["lm_head", "embed_out"]
supported_args = (
"summary_dropout_prob",
"v_head_initializer_range",
"v_head_init_strategy",
)
def __init__(self, pretrained_model, **kwargs):
r"""
Initializes the model.
Args:
pretrained_model (`transformers.PreTrainedModel`):
The model to wrap. It should be a causal language model such as GPT2.
or any model mapped inside the `AutoModelForCausalLM` class.
kwargs (`dict`, `optional`):
Additional keyword arguments, that are passed to the `ValueHead` class.
"""
super().__init__(pretrained_model, **kwargs)
v_head_kwargs, _, _ = self._split_kwargs(kwargs)
if not any(hasattr(self.pretrained_model, attribute) for attribute in self.lm_head_namings):
raise ValueError("The model does not have a language model head, please use a model that has one.")
self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs)
self._init_weights(**v_head_kwargs)
def _init_weights(self, **kwargs):
r"""
Initializes the weights of the value head. The default initialization strategy is random.
Users can pass a different initialization strategy by passing the `v_head_init_strategy` argument
when calling `.from_pretrained`. Supported strategies are:
- `normal`: initializes the weights with a normal distribution.
Args:
**kwargs (`dict`, `optional`):
Additional keyword arguments, that are passed to the `ValueHead` class. These arguments
can contain the `v_head_init_strategy` argument as well as the `v_head_initializer_range`
argument.
"""
initializer_range = kwargs.pop("v_head_initializer_range", 0.2)
# random init by default
init_strategy = kwargs.pop("v_head_init_strategy", None)
if init_strategy is None:
# do nothing
pass
elif init_strategy == "normal":
self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range)
self.v_head.summary.bias.data.zero_()
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
**kwargs,
):
r"""
Applies a forward pass to the wrapped model and returns the logits of the value head.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
past_key_values (`tuple(tuple(torch.FloatTensor))`, `optional`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past_key_values` input) to speed up sequential decoding.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
kwargs (`dict`, `optional`):
Additional keyword arguments, that are passed to the wrapped model.
"""
kwargs["output_hidden_states"] = True # this had already been set in the LORA / PEFT examples
kwargs["past_key_values"] = past_key_values
if self.is_peft_model and self.pretrained_model.active_peft_config.peft_type == "PREFIX_TUNING":
kwargs.pop("past_key_values")
base_model_output = self.pretrained_model(
input_ids=input_ids,
attention_mask=attention_mask,
**kwargs,
)
last_hidden_state = base_model_output.hidden_states[-1]
lm_logits = base_model_output.logits
loss = base_model_output.loss
if last_hidden_state.device != self.v_head.summary.weight.device:
last_hidden_state = last_hidden_state.to(self.v_head.summary.weight.device)
value = self.v_head(last_hidden_state).squeeze(-1)
# force upcast in fp32 if logits are in half-precision
if lm_logits.dtype != torch.float32:
lm_logits = lm_logits.float()
return (lm_logits, loss, value)
def generate(self, *args, **kwargs):
r"""
A simple wrapper around the `generate` method of the wrapped model.
Please refer to the [`generate`](https://huggingface.co/docs/transformers/internal/generation_utils)
method of the wrapped model for more information about the supported arguments.
Args:
*args (`list`, *optional*):
Positional arguments passed to the `generate` method of the wrapped model.
**kwargs (`dict`, *optional*):
Keyword arguments passed to the `generate` method of the wrapped model.
"""
return self.pretrained_model.generate(*args, **kwargs)
def state_dict(self, *args, **kwargs):
r"""
Returns the state dictionary of the model. We add the state dictionary of the value head
to the state dictionary of the wrapped model by prepending the key with `v_head.`.
"""
if not self.is_peft_model:
pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs)
else:
# if it is a peft model, only save the v_head
pretrained_model_state_dict = {}
v_head_state_dict = self.v_head.state_dict(*args, **kwargs)
for k, v in v_head_state_dict.items():
pretrained_model_state_dict[f"v_head.{k}"] = v
return pretrained_model_state_dict
def push_to_hub(self, *args, **kwargs):
setattr(self.pretrained_model, "v_head", self.v_head)
return self.pretrained_model.push_to_hub(*args, **kwargs)
def post_init(self, state_dict):
r"""
We add the state dictionary of the value head to the state dictionary of the wrapped model
by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the
keys of the value head state dictionary.
"""
for k in list(state_dict.keys()):
if "v_head." in k:
state_dict[k.replace("v_head.", "")] = state_dict.pop(k)
self.v_head.load_state_dict(state_dict, strict=False)
del state_dict
if hasattr(self.pretrained_model, "hf_device_map"):
if (
"cpu" in self.pretrained_model.hf_device_map.values()
or "disk" in self.pretrained_model.hf_device_map.values()
):
raise ValueError(
"The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models."
)
first_device = list(set(self.pretrained_model.hf_device_map.values()))[0]
self.v_head = self.v_head.to(first_device)
def set_device_hook(module, input, outputs):
new_output = ()
for output in outputs:
if isinstance(output, torch.Tensor):
new_output += (output.to(first_device),)
else:
new_output += (output,)
return new_output
self.register_forward_hook(set_device_hook)
self.is_sequential_parallel = True
class AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper):
r"""
A seq2seq model with a value head in addition to the language model head.
This class inherits from `~trl.PreTrainedModelWrapper` and wraps a
`transformers.PreTrainedModel` class. The wrapper class supports classic functions
such as `from_pretrained` and `push_to_hub` and also provides some additional
functionalities such as `generate`.
Args:
pretrained_model (`transformers.PreTrainedModel`):
The model to wrap. It should be a causal language model such as GPT2.
or any model mapped inside the `AutoModelForSeq2SeqLM` class.
kwargs:
Additional keyword arguments passed along to the `ValueHead` class.
"""
transformers_parent_class = AutoModelForSeq2SeqLM
lm_head_namings = ["lm_head", "embed_out", "output_projection"]
supported_args = (
"summary_dropout_prob",
"v_head_initializer_range",
"v_head_init_strategy",
)
def __init__(self, pretrained_model, **kwargs):
super().__init__(pretrained_model, **kwargs)
v_head_kwargs, _, _ = self._split_kwargs(kwargs)
self.is_encoder_decoder = True
if not self._has_lm_head():
raise ValueError("The model does not have a language model head, please use a model that has one.")
self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs)
self._init_weights(**v_head_kwargs)
def _has_lm_head(self):
# check module names of all modules inside `pretrained_model` to find the language model head
for name, module in self.pretrained_model.named_modules():
if any(attribute in name for attribute in self.lm_head_namings):
return True
return False
def post_init(self, state_dict):
r"""
We add the state dictionary of the value head to the state dictionary of the wrapped model
by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the
keys of the value head state dictionary.
"""
for k in list(state_dict.keys()):
if "v_head." in k:
state_dict[k.replace("v_head.", "")] = state_dict.pop(k)
self.v_head.load_state_dict(state_dict, strict=False)
del state_dict
if hasattr(self.pretrained_model, "hf_device_map"):
if (
"cpu" in self.pretrained_model.hf_device_map.values()
or "disk" in self.pretrained_model.hf_device_map.values()
):
raise ValueError(
"The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models."
)
# get the lm_head device
for name, module in self.pretrained_model.named_modules():
if any(attribute in name for attribute in self.lm_head_namings):
lm_head_device = module.weight.device
break
# put v_head on the same device as the lm_head to avoid issues
self.v_head = self.v_head.to(lm_head_device)
def set_device_hook(module, input, outputs):
r"""
A hook that sets the device of the output of the model to the device of the first
parameter of the model.
Args:
module (`nn.Module`):
The module to which the hook is attached.
input (`tuple`):
The input to the module.
outputs (`tuple`):
The output of the module.
"""
new_output = ()
for output in outputs:
if isinstance(output, torch.Tensor):
new_output += (output.to(lm_head_device),)
else:
new_output += (output,)
return new_output
self.register_forward_hook(set_device_hook)
self.is_sequential_parallel = True
def state_dict(self, *args, **kwargs):
r"""
Returns the state dictionary of the model. We add the state dictionary of the value head
to the state dictionary of the wrapped model by prepending the key with `v_head.`.
"""
if not self.is_peft_model:
pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs)
else:
# if it is a peft model, only save the v_head
pretrained_model_state_dict = {}
v_head_state_dict = self.v_head.state_dict(*args, **kwargs)
for k, v in v_head_state_dict.items():
pretrained_model_state_dict[f"v_head.{k}"] = v
return pretrained_model_state_dict
def push_to_hub(self, *args, **kwargs):
setattr(self.pretrained_model, "v_head", self.v_head)
return self.pretrained_model.push_to_hub(*args, **kwargs)
def _init_weights(self, **kwargs):
r"""
We initialize the weights of the value head.
"""
initializer_range = kwargs.pop("v_head_initializer_range", 0.2)
# random init by default
init_strategy = kwargs.pop("v_head_init_strategy", None)
if init_strategy is None:
# do nothing
pass
elif init_strategy == "normal":
self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range)
self.v_head.summary.bias.data.zero_()
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
**kwargs,
):
kwargs["past_key_values"] = past_key_values
if self.is_peft_model and self.pretrained_model.active_peft_config.peft_type == "PREFIX_TUNING":
kwargs.pop("past_key_values")
base_model_output = self.pretrained_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_hidden_states=True, # We force the model to output hidden states
**kwargs,
)
last_hidden_state = base_model_output.decoder_hidden_states[-1]
lm_logits = base_model_output.logits
loss = base_model_output.loss
value = self.v_head(last_hidden_state).squeeze(-1)
# force upcast in fp32 if logits are in half-precision
if lm_logits.dtype != torch.float32:
lm_logits = lm_logits.float()
return (lm_logits, loss, value)
def generate(self, *args, **kwargs):
r"""
We call `generate` on the wrapped model.
"""
return self.pretrained_model.generate(*args, **kwargs)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/models/__init__.py | # flake8: noqa
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .modeling_base import PreTrainedModelWrapper, create_reference_model
from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead
SUPPORTED_ARCHITECTURES = (
AutoModelForCausalLMWithValueHead,
AutoModelForSeq2SeqLMWithValueHead,
)
from ..import_utils import is_diffusers_available
if is_diffusers_available():
from .modeling_sd_base import (
DDPOPipelineOutput,
DDPOSchedulerOutput,
DDPOStableDiffusionPipeline,
DefaultDDPOStableDiffusionPipeline,
)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/models/modeling_base.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from copy import deepcopy
import torch
import torch.nn as nn
from accelerate import PartialState
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import (
EntryNotFoundError,
HFValidationError,
LocalEntryNotFoundError,
RepositoryNotFoundError,
)
from safetensors.torch import load_file as safe_load_file
from transformers import PreTrainedModel
from ..import_utils import is_npu_available, is_peft_available, is_transformers_greater_than, is_xpu_available
if is_peft_available():
from peft import (
PeftConfig,
PeftModel,
PeftModelForCausalLM,
PeftModelForSeq2SeqLM,
PromptLearningConfig,
get_peft_model,
prepare_model_for_kbit_training,
)
if is_transformers_greater_than("4.33.0"):
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
else:
from transformers.deepspeed import is_deepspeed_zero3_enabled
LAYER_PATTERNS = [
"transformer.h.{layer}",
"model.decoder.layers.{layer}",
"gpt_neox.layers.{layer}",
"model.layers.{layer}",
]
class PreTrainedModelWrapper(nn.Module):
r"""
A wrapper class around a (`transformers.PreTrainedModel`) to be compatible with the
(`~transformers.PreTrained`) class in order to keep some attributes and methods of the
(`~transformers.PreTrainedModel`) class.
Attributes:
pretrained_model: (`transformers.PreTrainedModel`)
The model to be wrapped.
parent_class: (`transformers.PreTrainedModel`)
The parent class of the model to be wrapped.
supported_args: (`list`)
The list of arguments that are supported by the wrapper class.
"""
transformers_parent_class = None
supported_args = None
supported_modules = ("v_head",)
supported_rm_modules = ("score",)
supported_pretrained_model_architectures = (
(PreTrainedModel)
if not is_peft_available()
else (PreTrainedModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM)
)
def __init__(
self, pretrained_model=None, score_module=None, supports_rm_adapter=False, rm_adapter_name=None, **kwargs
):
super().__init__()
self.pretrained_model = pretrained_model
self.config = pretrained_model.config
self.prepare_inputs_for_generation = pretrained_model.prepare_inputs_for_generation
self.is_loaded_in_8bit = getattr(pretrained_model, "is_loaded_in_8bit", False)
self.is_loaded_in_4bit = getattr(pretrained_model, "is_loaded_in_4bit", False)
self.is_sequential_parallel = False
if hasattr(pretrained_model, "gradient_checkpointing_disable"):
self.gradient_checkpointing_disable = pretrained_model.gradient_checkpointing_disable
if hasattr(pretrained_model, "gradient_checkpointing_enable"):
self.gradient_checkpointing_enable = pretrained_model.gradient_checkpointing_enable
self.supports_rm_adapter = supports_rm_adapter
self.rm_adapter_name = rm_adapter_name
self.policy_adapter_name = "default"
if score_module is not None:
self.score = score_module
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiates a new model from a pretrained model from `transformers`. The
pretrained model is loaded using the `from_pretrained` method of the
`transformers.PreTrainedModel` class. The arguments that are specific to the
`transformers.PreTrainedModel` class are passed along this method and filtered
out from the `kwargs` argument.
Args:
pretrained_model_name_or_path (`str` or `transformers.PreTrainedModel`):
The path to the pretrained model or its name.
*model_args (`list`, *optional*)):
Additional positional arguments passed along to the underlying model's
`from_pretrained` method.
**kwargs (`dict`, *optional*):
Additional keyword arguments passed along to the underlying model's
`from_pretrained` method. We also pre-process the kwargs to extract
the arguments that are specific to the `transformers.PreTrainedModel`
class and the arguments that are specific to trl models. The kwargs
also support `prepare_model_for_kbit_training` arguments from
`peft` library.
"""
if kwargs is not None:
peft_config = kwargs.pop("peft_config", None)
reward_adapter = kwargs.pop("reward_adapter", None)
reward_adapter_name = kwargs.pop("reward_adapter_name", "reward_adapter")
is_trainable = kwargs.pop("is_trainable", False)
trl_model_args, pretrained_kwargs, peft_quantization_kwargs = cls._split_kwargs(kwargs)
token = pretrained_kwargs.get("token", None)
else:
peft_config = None
is_trainable = False
trl_model_args = {}
pretrained_kwargs = {}
peft_quantization_kwargs = {}
token = None
if reward_adapter is not None and not isinstance(reward_adapter, str):
raise ValueError(
"The `reward_adapter` argument should be a string representing the name of local path or the Hub id to the Reward Modeling adapter."
)
is_peft_model = False
current_device = cls._get_current_device()
if isinstance(pretrained_model_name_or_path, str):
is_loaded_in_8bit = pretrained_kwargs["load_in_8bit"] if "load_in_8bit" in pretrained_kwargs else False
is_loaded_in_4bit = pretrained_kwargs["load_in_4bit"] if "load_in_4bit" in pretrained_kwargs else False
else:
is_loaded_in_8bit = getattr(pretrained_model_name_or_path, "is_loaded_in_8bit", False)
is_loaded_in_4bit = getattr(pretrained_model_name_or_path, "is_loaded_in_4bit", False)
if (is_loaded_in_8bit or is_loaded_in_4bit) and "device_map" not in pretrained_kwargs:
# warn users
logging.warning(
"The `device_map` argument is not provided. We will override the device_map argument."
" to set the entire"
" model on the current device. If you want to set the model on multiple devices, please provide"
" a custom `device_map` argument."
)
pretrained_kwargs["device_map"] = {"": current_device}
if is_peft_available() and peft_config is not None and not isinstance(peft_config, PeftConfig):
raise ValueError("The `peft_config` argument should be an instance of `peft.PeftConfig` class.")
# First, load the pre-trained model using the parent-class
# either `AutoModelForCausalLM` or `AutoModelForSeq2SeqLM`
if isinstance(pretrained_model_name_or_path, str):
if is_peft_available():
try:
# If there is a trained peft adapter in the hub, load its config.
remote_adapter_config = hf_hub_download(
pretrained_model_name_or_path,
"adapter_config.json",
token=token,
)
except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError):
remote_adapter_config = None
else:
remote_adapter_config = None
local_adapter_present = os.path.exists(os.path.join(pretrained_model_name_or_path, "adapter_config.json"))
if (local_adapter_present or remote_adapter_config is not None) and is_peft_available():
if peft_config is not None:
logging.warning(
"`peft_config` argument ignored since a peft config file was found in "
f"{pretrained_model_name_or_path}"
)
# Load the trained peft adapter config
if local_adapter_present:
trained_adapter_config = PeftConfig.from_pretrained(pretrained_model_name_or_path)
else:
remote_adapter_dir = os.path.dirname(remote_adapter_config)
trained_adapter_config = PeftConfig.from_pretrained(remote_adapter_dir)
# Load the pretrained base model
pretrained_model = cls.transformers_parent_class.from_pretrained(
trained_adapter_config.base_model_name_or_path, *model_args, **pretrained_kwargs
)
# Wrap the pretrained model with the trained peft adapter
pretrained_model = PeftModel.from_pretrained(
pretrained_model, pretrained_model_name_or_path, is_trainable=is_trainable
)
logging.info("Trained peft adapter loaded")
else:
pretrained_model = cls.transformers_parent_class.from_pretrained(
pretrained_model_name_or_path, *model_args, **pretrained_kwargs
)
if peft_config is not None:
# Initialize a new peft adapter with the given config
if is_loaded_in_8bit or is_loaded_in_4bit:
pretrained_model = prepare_model_for_kbit_training(
pretrained_model,
**peft_quantization_kwargs,
)
pretrained_model = get_peft_model(pretrained_model, peft_config)
logging.info("peft adapter initialised")
elif isinstance(pretrained_model_name_or_path, cls.supported_pretrained_model_architectures):
pretrained_model = pretrained_model_name_or_path
if peft_config is not None and isinstance(pretrained_model, PreTrainedModel):
# Initialize a new peft adapter with the given config
if is_loaded_in_8bit or is_loaded_in_4bit:
pretrained_model = prepare_model_for_kbit_training(
pretrained_model,
**peft_quantization_kwargs,
)
pretrained_model = get_peft_model(pretrained_model, peft_config)
logging.info("peft adapter initialised")
else:
raise ValueError(
"pretrained_model_name_or_path should be a string or a PreTrainedModel, "
f"but is {type(pretrained_model_name_or_path)}"
)
if is_peft_available():
if isinstance(pretrained_model, PeftModel):
is_peft_model = True
# for backward compatibility
if hasattr(pretrained_model, "active_peft_config") and isinstance(
pretrained_model.active_peft_config, PromptLearningConfig
):
raise ValueError("PromptLearningConfig is not supported for PPO training.")
# Add reward modeling adapter if specified
if not is_peft_model and reward_adapter is not None:
raise ValueError("reward_adapter can only be used with a PeftModel. ")
elif is_peft_model and reward_adapter is not None:
score_module = cls.add_and_load_reward_modeling_adapter(
pretrained_model, reward_adapter, reward_adapter_name, token=token
)
multi_adapter_args = {
"score_module": score_module,
"supports_rm_adapter": True,
"rm_adapter_name": reward_adapter_name,
}
else:
multi_adapter_args = {"supports_rm_adapter": False}
# Then, create the full model by instantiating the wrapper class
model = cls(pretrained_model, **multi_adapter_args, **trl_model_args)
# if resume_training, load the state_dict again - this is ok since the
# state_dict is removed from the model after loading it.
is_resuming_training = True
if isinstance(pretrained_model_name_or_path, str):
safe_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors")
filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin")
sharded_index_filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin.index.json")
safe_sharded_index_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors.index.json")
is_sharded = False
use_safe = os.path.exists(safe_filename)
if not (os.path.exists(filename) or os.path.exists(safe_filename)):
# Try with `pytorch_model.bin`
filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub(
pretrained_model,
pretrained_model_name_or_path,
sharded_index_filename,
token=token,
)
# Try with safetensors
if filename is None and files_to_download is None:
safe_filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub(
pretrained_model,
pretrained_model_name_or_path,
safe_sharded_index_filename,
token=token,
model_name="model.safetensors",
model_index_name="model.safetensors.index.json",
)
use_safe = True
else:
use_safe = False
loading_func = safe_load_file if use_safe else torch.load
load_kwargs = {} if use_safe else {"map_location": "cpu"}
if is_resuming_training:
if is_sharded:
# download each file and add it to the state_dict
state_dict = {}
for shard_file in files_to_download:
filename = hf_hub_download(
pretrained_model_name_or_path,
shard_file,
token=token,
)
state_dict.update(loading_func(filename, **load_kwargs))
else:
state_dict = loading_func(filename if not use_safe else safe_filename, **load_kwargs)
else:
state_dict = pretrained_model_name_or_path.state_dict()
model.is_peft_model = is_peft_model
model.current_device = current_device
if is_resuming_training:
model.post_init(state_dict=state_dict)
return model
@classmethod
def _get_checkpoint_from_hub(
cls,
pretrained_model,
pretrained_model_name_or_path,
index_filename,
token=None,
model_name="pytorch_model.bin",
model_index_name="pytorch_model.bin.index.json",
):
files_to_download = None
filename = None
is_resuming_training = True
is_sharded = False
try:
filename = hf_hub_download(
pretrained_model_name_or_path,
model_name,
token=token,
)
# sharded
except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError):
if os.path.exists(index_filename):
index_file_name = index_filename
else:
try:
index_file_name = hf_hub_download(
pretrained_model_name_or_path,
model_index_name,
token=token,
)
except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError):
# not continue training, do not have v_head weight
is_resuming_training = False
logging.warning(
f"A {type(pretrained_model)} model is loaded from '{pretrained_model_name_or_path}', "
f"and no v_head weight is found. This IS expected if you are not resuming PPO training."
)
# load json
if is_resuming_training:
with open(index_file_name, "r") as f:
index = json.load(f)
# check filename with `v_head` or any known extra module:
files_to_download = set()
for k, v in index["weight_map"].items():
if any([module in k for module in cls.supported_modules]):
files_to_download.add(v)
is_sharded = True
return filename, files_to_download, is_sharded, is_resuming_training
@classmethod
def _get_current_device(cls):
r"""
Get the current device. For GPU, we return the local process index using the `accelerate.PartialState`
object to handle corner cases when running scripts in distributed environments.
Returns:
current_device (`Union[int, str]`):
The current device.
"""
state = PartialState()
if is_xpu_available():
return f"xpu:{state.local_process_index}"
elif is_npu_available():
return f"npu:{state.local_process_index}"
else:
return state.local_process_index if torch.cuda.is_available() else "cpu"
@classmethod
def _split_kwargs(cls, kwargs):
"""
Separate the kwargs from the arguments that we support inside
`supported_args` and the ones that we don't.
"""
check_peft_kwargs = False
if is_peft_available():
from peft import prepare_model_for_kbit_training
check_peft_kwargs = True
supported_kwargs = {}
unsupported_kwargs = {}
peft_kwargs = {}
for key, value in kwargs.items():
if key in cls.supported_args:
supported_kwargs[key] = value
else:
unsupported_kwargs[key] = value
if check_peft_kwargs:
if key in prepare_model_for_kbit_training.__code__.co_varnames:
peft_kwargs[key] = value
if key in unsupported_kwargs:
unsupported_kwargs.pop(key)
return supported_kwargs, unsupported_kwargs, peft_kwargs
@classmethod
def add_and_load_reward_modeling_adapter(
cls, pretrained_model, adapter_model_id, adapter_name="reward_model_adapter", token=None
):
r"""
Add and load a reward modeling adapter. This method can only be used if the
model is a `PeftModel` and if you have initialized the model with the `reward_modeling_adapter_id`
argument, pointing to the id of the reward modeling adapter. The latest needs also to contain the
score head in order to produce the reward.
"""
pretrained_model.load_adapter(adapter_model_id, adapter_name, is_trainable=False)
pretrained_model.train()
filename = os.path.join(adapter_model_id, "adapter_model.bin")
safe_loading = False
if not os.path.exists(filename):
try:
local_filename = hf_hub_download(
adapter_model_id,
"adapter_model.bin",
token=token,
)
except: # noqa
filename = os.path.join(adapter_model_id, "adapter_model.safetensors")
safe_loading = True
if not os.path.exists(filename):
try:
local_filename = hf_hub_download(
adapter_model_id,
"adapter_model.safetensors",
token=token,
)
except: # noqa
raise ValueError(
"Could not find adapter model in the Hub, make sure you have the correct adapter model id."
)
else:
local_filename = filename
else:
local_filename = filename
loading_func = safe_load_file if safe_loading else torch.load
load_kwargs = {} if safe_loading else {"map_location": "cpu"}
adapter_state_dict = loading_func(local_filename, **load_kwargs)
for score_name_candidate in cls.supported_rm_modules:
if any([score_name_candidate in name for name in adapter_state_dict.keys()]):
score_name = score_name_candidate
# we have found the correct head name and can break
break
score_dict = {}
for name, param in adapter_state_dict.items():
if score_name in name:
key_name = ".".join(name.split(".")[-1:])
score_dict[key_name] = param.to(cls._get_current_device())
num_labels, hidden_dim = score_dict["weight"].shape
has_bias = any(["bias" in name for name in adapter_state_dict.keys()])
score = nn.Linear(hidden_dim, num_labels, bias=has_bias).to(
device=cls._get_current_device(),
dtype=pretrained_model.dtype,
)
score.load_state_dict(score_dict)
for param in score.parameters():
param.requires_grad = False
return score
def push_to_hub(self, *args, **kwargs):
r"""
Push the pretrained model to the hub. This method is a wrapper around
`transformers.PreTrainedModel.push_to_hub`. Please refer to the documentation
of `transformers.PreTrainedModel.push_to_hub` for more information.
Args:
*args (`list`, *optional*):
Positional arguments passed along to the underlying model's
`push_to_hub` method.
**kwargs (`dict`, *optional*):
Keyword arguments passed along to the underlying model's
`push_to_hub` method.
"""
raise NotImplementedError
def save_pretrained(self, *args, **kwargs):
r"""
Save the pretrained model to a directory. This method is a wrapper around
`transformers.PreTrainedModel.save_pretrained`. Please refer to the documentation
of `transformers.PreTrainedModel.save_pretrained` for more information.
Args:
*args (`list`, *optional*):
Positional arguments passed along to the underlying model's
`save_pretrained` method.
**kwargs (`dict`, *optional*):
Keyword arguments passed along to the underlying model's
`save_pretrained` method.
"""
state_dict = kwargs.get("state_dict")
if state_dict is None:
state_dict = self.state_dict()
kwargs["state_dict"] = state_dict
# if it is a peft model only save the `v_head` state_dict and
# pop the `state_dict` from the kwargs to avoid slient bugs with `peft`
if self.is_peft_model:
save_path = args[0]
save_path = os.path.join(save_path, "pytorch_model.bin")
torch.save(state_dict, save_path)
_ = kwargs.pop("state_dict", None)
return self.pretrained_model.save_pretrained(*args, **kwargs)
def state_dict(self, *args, **kwargs):
r"""
Return the state_dict of the pretrained model.
"""
raise NotImplementedError
def post_init(self, *args, **kwargs):
r"""
Post initialization method. This method is called after the model is
instantiated and loaded from a checkpoint. It can be used to perform
additional operations such as loading the state_dict.
"""
raise NotImplementedError
def compute_reward_score(self, input_ids, attention_mask=None, **kwargs):
r"""
Computes the reward score for a given input. The method has first to enable the adapter
and then compute the reward score. After that the model disables the reward modeling
adapter and enables the default ppo adapter again.
"""
if not self.supports_rm_adapter:
raise ValueError("This model does not support reward modeling adapter.")
# enable rm adapter
self.pretrained_model.set_adapter(self.rm_adapter_name)
self.pretrained_model.eval()
with torch.no_grad():
base_model_output = self.pretrained_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_hidden_states=True,
return_dict=True,
**kwargs,
)
last_hidden_states = base_model_output.hidden_states[-1]
scores = self.score(last_hidden_states)
self.pretrained_model.set_adapter(self.policy_adapter_name)
self.pretrained_model.eval()
return scores
def create_reference_model(
model: PreTrainedModelWrapper, num_shared_layers: int = None, pattern: str = None
) -> PreTrainedModelWrapper:
"""
Creates a static reference copy of a model. Note that model will be in `.eval()` mode.
Args:
model (`PreTrainedModelWrapper`): The model to be copied.
num_shared_layers (`int`, *optional*): The number of initial layers that are shared between both models and kept frozen.
pattern (`str`, *optional*): The shared layers are selected with a string pattern
(e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here.
Returns
`PreTrainedModelWrapper`
"""
if is_deepspeed_zero3_enabled():
raise ValueError(
"DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoCausalLM.from_pretrained()`."
)
parameter_names = [n for n, _ in model.named_parameters()]
ref_model = deepcopy(model)
# if no layers are shared, return copy of model
if num_shared_layers is None:
for param_name in parameter_names:
param = ref_model.get_parameter(param_name)
param.requires_grad = False
return ref_model.eval()
# identify layer name pattern
if pattern is not None:
pattern = pattern.format(layer=num_shared_layers)
else:
for pattern_candidate in LAYER_PATTERNS:
pattern_candidate = pattern_candidate.format(layer=num_shared_layers)
if any([pattern_candidate in name for name in parameter_names]):
pattern = pattern_candidate
break
if pattern is None:
raise ValueError("Layer pattern could not be matched.")
# divide parameters in shared and unshared parameter lists
shared_param_list = []
unshared_param_list = []
shared_parameter = True
for name, param in model.named_parameters():
if pattern in name:
shared_parameter = False
if shared_parameter:
shared_param_list.append(name)
else:
unshared_param_list.append(name)
# create reference of the original parameter if they are shared
for param_name in shared_param_list:
param = model.get_parameter(param_name)
param.requires_grad = False
ref_param = ref_model.get_parameter(param_name) # noqa
ref_param = param # noqa
# for all other parameters just make sure they don't use gradients
for param_name in unshared_param_list:
param = ref_model.get_parameter(param_name)
param.requires_grad = False
if pattern is not None and len(unshared_param_list) == 0:
logging.warning("Pattern passed or found, but no layers matched in the model. Check for a typo.")
return ref_model.eval()
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/models/modeling_sd_base.py | # Copyright 2023 DDPO-pytorch authors (Kevin Black), The HuggingFace Team, metric-space. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import torch
from diffusers import DDIMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
from diffusers.utils import convert_state_dict_to_diffusers
from ..core import randn_tensor
from ..import_utils import is_peft_available
if is_peft_available():
from peft import LoraConfig
from peft.utils import get_peft_model_state_dict
@dataclass
class DDPOPipelineOutput(object):
"""
Output class for the diffusers pipeline to be finetuned with the DDPO trainer
Args:
images (`torch.Tensor`):
The generated images.
latents (`List[torch.Tensor]`):
The latents used to generate the images.
log_probs (`List[torch.Tensor]`):
The log probabilities of the latents.
"""
images: torch.Tensor
latents: torch.Tensor
log_probs: torch.Tensor
@dataclass
class DDPOSchedulerOutput(object):
"""
Output class for the diffusers scheduler to be finetuned with the DDPO trainer
Args:
latents (`torch.Tensor`):
Predicted sample at the previous timestep. Shape: `(batch_size, num_channels, height, width)`
log_probs (`torch.Tensor`):
Log probability of the above mentioned sample. Shape: `(batch_size)`
"""
latents: torch.Tensor
log_probs: torch.Tensor
class DDPOStableDiffusionPipeline(object):
"""
Main class for the diffusers pipeline to be finetuned with the DDPO trainer
"""
def __call__(self, *args, **kwargs) -> DDPOPipelineOutput:
raise NotImplementedError
def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput:
raise NotImplementedError
@property
def unet(self):
"""
Returns the 2d U-Net model used for diffusion.
"""
raise NotImplementedError
@property
def vae(self):
"""
Returns the Variational Autoencoder model used from mapping images to and from the latent space
"""
raise NotImplementedError
@property
def tokenizer(self):
"""
Returns the tokenizer used for tokenizing text inputs
"""
raise NotImplementedError
@property
def scheduler(self):
"""
Returns the scheduler associated with the pipeline used for the diffusion process
"""
raise NotImplementedError
@property
def text_encoder(self):
"""
Returns the text encoder used for encoding text inputs
"""
raise NotImplementedError
@property
def autocast(self):
"""
Returns the autocast context manager
"""
raise NotImplementedError
def set_progress_bar_config(self, *args, **kwargs):
"""
Sets the progress bar config for the pipeline
"""
raise NotImplementedError
def save_pretrained(self, *args, **kwargs):
"""
Saves all of the model weights
"""
raise NotImplementedError
def get_trainable_layers(self, *args, **kwargs):
"""
Returns the trainable parameters of the pipeline
"""
raise NotImplementedError
def save_checkpoint(self, *args, **kwargs):
"""
Light wrapper around accelerate's register_save_state_pre_hook which is run before saving state
"""
raise NotImplementedError
def load_checkpoint(self, *args, **kwargs):
"""
Light wrapper around accelerate's register_lad_state_pre_hook which is run before loading state
"""
raise NotImplementedError
def _left_broadcast(input_tensor, shape):
"""
As opposed to the default direction of broadcasting (right to left), this function broadcasts
from left to right
Args:
input_tensor (`torch.FloatTensor`): is the tensor to broadcast
shape (`Tuple[int]`): is the shape to broadcast to
"""
input_ndim = input_tensor.ndim
if input_ndim > len(shape):
raise ValueError(
"The number of dimensions of the tensor to broadcast cannot be greater than the length of the shape to broadcast to"
)
return input_tensor.reshape(input_tensor.shape + (1,) * (len(shape) - input_ndim)).broadcast_to(shape)
def _get_variance(self, timestep, prev_timestep):
alpha_prod_t = torch.gather(self.alphas_cumprod, 0, timestep.cpu()).to(timestep.device)
alpha_prod_t_prev = torch.where(
prev_timestep.cpu() >= 0,
self.alphas_cumprod.gather(0, prev_timestep.cpu()),
self.final_alpha_cumprod,
).to(timestep.device)
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
return variance
def scheduler_step(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
eta: float = 0.0,
use_clipped_model_output: bool = False,
generator=None,
prev_sample: Optional[torch.FloatTensor] = None,
) -> DDPOSchedulerOutput:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
eta (`float`): weight of noise for added noise in diffusion step.
use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped
predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when
`self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would
coincide with the one provided as input and `use_clipped_model_output` will have not effect.
generator: random number generator.
variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we
can directly provide the noise for the variance itself. This is useful for methods such as
CycleDiffusion. (https://arxiv.org/abs/2210.05559)
Returns:
`DDPOSchedulerOutput`: the predicted sample at the previous timestep and the log probability of the sample
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
# to prevent OOB on gather
prev_timestep = torch.clamp(prev_timestep, 0, self.config.num_train_timesteps - 1)
# 2. compute alphas, betas
alpha_prod_t = self.alphas_cumprod.gather(0, timestep.cpu())
alpha_prod_t_prev = torch.where(
prev_timestep.cpu() >= 0,
self.alphas_cumprod.gather(0, prev_timestep.cpu()),
self.final_alpha_cumprod,
)
alpha_prod_t = _left_broadcast(alpha_prod_t, sample.shape).to(sample.device)
alpha_prod_t_prev = _left_broadcast(alpha_prod_t_prev, sample.shape).to(sample.device)
beta_prod_t = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
pred_epsilon = model_output
elif self.config.prediction_type == "sample":
pred_original_sample = model_output
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
elif self.config.prediction_type == "v_prediction":
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`"
)
# 4. Clip or threshold "predicted x_0"
if self.config.thresholding:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:
pred_original_sample = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range
)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
variance = _get_variance(self, timestep, prev_timestep)
std_dev_t = eta * variance ** (0.5)
std_dev_t = _left_broadcast(std_dev_t, sample.shape).to(sample.device)
if use_clipped_model_output:
# the pred_epsilon is always re-derived from the clipped x_0 in Glide
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
prev_sample_mean = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
if prev_sample is not None and generator is not None:
raise ValueError(
"Cannot pass both generator and prev_sample. Please make sure that either `generator` or"
" `prev_sample` stays `None`."
)
if prev_sample is None:
variance_noise = randn_tensor(
model_output.shape,
generator=generator,
device=model_output.device,
dtype=model_output.dtype,
)
prev_sample = prev_sample_mean + std_dev_t * variance_noise
# log prob of prev_sample given prev_sample_mean and std_dev_t
log_prob = (
-((prev_sample.detach() - prev_sample_mean) ** 2) / (2 * (std_dev_t**2))
- torch.log(std_dev_t)
- torch.log(torch.sqrt(2 * torch.as_tensor(np.pi)))
)
# mean along all but batch dimension
log_prob = log_prob.mean(dim=tuple(range(1, log_prob.ndim)))
return DDPOSchedulerOutput(prev_sample.type(sample.dtype), log_prob)
# 1. The output type for call is different as the logprobs are now returned
# 2. An extra method called `scheduler_step` is added which is used to constraint the scheduler output
@torch.no_grad()
def pipeline_step(
self,
prompt: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
r"""
Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
Examples:
Returns:
`DDPOPipelineOutput`: The generated image, the predicted latents used to generate the image and the associated log probabilities
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
all_latents = [latents]
all_log_probs = []
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
scheduler_output = scheduler_step(self.scheduler, noise_pred, t, latents, eta)
latents = scheduler_output.latents
log_prob = scheduler_output.log_probs
all_latents.append(latents)
all_log_probs.append(log_prob)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
return DDPOPipelineOutput(image, all_latents, all_log_probs)
class DefaultDDPOStableDiffusionPipeline(DDPOStableDiffusionPipeline):
def __init__(self, pretrained_model_name: str, *, pretrained_model_revision: str = "main", use_lora: bool = True):
self.sd_pipeline = StableDiffusionPipeline.from_pretrained(
pretrained_model_name, revision=pretrained_model_revision
)
self.use_lora = use_lora
self.pretrained_model = pretrained_model_name
self.pretrained_revision = pretrained_model_revision
try:
self.sd_pipeline.load_lora_weights(
pretrained_model_name,
weight_name="pytorch_lora_weights.safetensors",
revision=pretrained_model_revision,
)
self.use_lora = True
except OSError:
if use_lora:
warnings.warn(
"If you are aware that the pretrained model has no lora weights to it, ignore this message. "
"Otherwise please check the if `pytorch_lora_weights.safetensors` exists in the model folder."
)
self.sd_pipeline.scheduler = DDIMScheduler.from_config(self.sd_pipeline.scheduler.config)
self.sd_pipeline.safety_checker = None
# memory optimization
self.sd_pipeline.vae.requires_grad_(False)
self.sd_pipeline.text_encoder.requires_grad_(False)
self.sd_pipeline.unet.requires_grad_(not self.use_lora)
def __call__(self, *args, **kwargs) -> DDPOPipelineOutput:
return pipeline_step(self.sd_pipeline, *args, **kwargs)
def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput:
return scheduler_step(self.sd_pipeline.scheduler, *args, **kwargs)
@property
def unet(self):
return self.sd_pipeline.unet
@property
def vae(self):
return self.sd_pipeline.vae
@property
def tokenizer(self):
return self.sd_pipeline.tokenizer
@property
def scheduler(self):
return self.sd_pipeline.scheduler
@property
def text_encoder(self):
return self.sd_pipeline.text_encoder
@property
def autocast(self):
return contextlib.nullcontext if self.use_lora else None
def save_pretrained(self, output_dir):
if self.use_lora:
state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(self.sd_pipeline.unet))
self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict)
self.sd_pipeline.save_pretrained(output_dir)
def set_progress_bar_config(self, *args, **kwargs):
self.sd_pipeline.set_progress_bar_config(*args, **kwargs)
def get_trainable_layers(self):
if self.use_lora:
lora_config = LoraConfig(
r=4,
lora_alpha=4,
init_lora_weights="gaussian",
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
)
self.sd_pipeline.unet.add_adapter(lora_config)
# To avoid accelerate unscaling problems in FP16.
for param in self.sd_pipeline.unet.parameters():
# only upcast trainable parameters (LoRA) into fp32
if param.requires_grad:
param.data = param.to(torch.float32)
return self.sd_pipeline.unet
else:
return self.sd_pipeline.unet
def save_checkpoint(self, models, weights, output_dir):
if len(models) != 1:
raise ValueError("Given how the trainable params were set, this should be of length 1")
if self.use_lora and hasattr(models[0], "peft_config") and getattr(models[0], "peft_config", None) is not None:
state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(models[0]))
self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict)
elif not self.use_lora and isinstance(models[0], UNet2DConditionModel):
models[0].save_pretrained(os.path.join(output_dir, "unet"))
else:
raise ValueError(f"Unknown model type {type(models[0])}")
def load_checkpoint(self, models, input_dir):
if len(models) != 1:
raise ValueError("Given how the trainable params were set, this should be of length 1")
if self.use_lora:
lora_state_dict, network_alphas = self.sd_pipeline.lora_state_dict(
input_dir, weight_name="pytorch_lora_weights.safetensors"
)
self.sd_pipeline.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=models[0])
elif not self.use_lora and isinstance(models[0], UNet2DConditionModel):
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
models[0].register_to_config(**load_model.config)
models[0].load_state_dict(load_model.state_dict())
del load_model
else:
raise ValueError(f"Unknown model type {type(models[0])}")
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/iterative_sft_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from datasets import Dataset
from torch.utils.data import DataLoader
from transformers import (
DataCollator,
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
PreTrainedModel,
PreTrainedTokenizerBase,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import EvalLoopOutput
from ..core import PPODecorators
from ..import_utils import is_peft_available
if is_peft_available():
from peft import PeftModel
class IterativeSFTTrainer(Trainer):
"""
The IterativeSFTTrainer can be used to finetune models with methods that requires some steps between optimization.
Attributes:
**model** (`PreTrainedModel`) -- Model to be optimized, either an 'AutoModelForCausalLM' or an 'AutoModelForSeq2SeqLM'.
Check the documentation of `PreTrainedModel` for more details.
**args** (`transformers.TrainingArguments`): -- The arguments to use for training.
**tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the
data. Check the documentation of `transformers.PreTrainedTokenizer` and
`transformers.PreTrainedTokenizerFast` for more details.
**optimizers** (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): -- The optimizer and scheduler to use for training.
**data_collator** (Union[DataCollatorForLanguageModeling, DataCollatorForSeq2Seq], *optional*) -- Data collator to be used for training and
passed along the dataloader.
**eval_dataset** (`datasets.Dataset`): The dataset to use for evaluation.
**max_length** (`int`, defaults to `None`): -- The maximum length of the input.
**truncation_mode** (`str`, defaults to `keep_end`): -- The truncation mode to use, either `keep_end` or `keep_start`.
**preprocess_logits_for_metrics** (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): -- The function to use to preprocess the logits before computing the metrics.
**compute_metrics** (`Callable[[EvalPrediction], Dict]`, *optional*): -- The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values.
**optimize_device_cache ** (`bool`, *optional*, defaults to `False`) -- Optimize CUDA cache for slightly more memory-efficient training.
"""
def __init__(
self,
model: PreTrainedModel = None,
args: TrainingArguments = None,
tokenizer: PreTrainedTokenizerBase = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (
None,
None,
),
data_collator: Optional[DataCollator] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
max_length: Optional[int] = None,
truncation_mode: Optional[str] = "keep_end",
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None,
optimize_device_cache: Optional[bool] = False,
):
# Step 0: check positional arguments validity
if not isinstance(tokenizer, (PreTrainedTokenizerBase)):
raise ValueError(
f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}"
)
if not isinstance(model, PreTrainedModel):
raise ValueError(f"model must be a PreTrainedModel, got {type(model)}")
if not model.can_generate():
warnings.warn(
f"The current model class {type(model)} is not compatible with `.generate()`"
"Please make sure that this is intended."
)
if optimizers[1] is None and args.max_steps == -1:
raise ValueError(
"When no scheduler is provided, you need to set the total number of training steps to perform `max_steps`"
)
self.is_encoder_decoder = getattr(model.config, "is_encoder_decoder", False)
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
self.tokenizer = tokenizer
if data_collator is None:
if self.is_encoder_decoder:
warnings.warn(
"No data collator is provided. Using 'DataCollatorForSeq2Seq' with"
"'labels_pad_token_id' set to '-100' and 'pad_to_multiple_of' set to 8."
)
self.data_collator = DataCollatorForSeq2Seq(tokenizer, label_pad_token_id=-100, pad_to_multiple_of=8)
else:
warnings.warn("No data collator is provided. Using 'DataCollatorForLanguageModeling'")
self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
else:
self.data_collator = data_collator
self.max_length = max_length
self.truncation_mode = truncation_mode
self.optimize_device_cache = optimize_device_cache
super().__init__(
model=model,
args=args,
data_collator=self.data_collator,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
self.create_optimizer_and_scheduler(self.args.max_steps)
# prepare model, optimizer and lr_scheduler
self.model, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.model, self.optimizer, self.lr_scheduler
)
self.tokenizer.truncation_side = "left" if self.truncation_mode == "keep_end" else "right"
if not hasattr(self, "accelerator"):
raise AttributeError(
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
)
PPODecorators.optimize_device_cache = self.optimize_device_cache
def prepare_model_inputs(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, labels: torch.Tensor):
if attention_mask is None:
attention_mask = [torch.ones_like(ids) for ids in input_ids]
if self.is_encoder_decoder:
input_data = self.data_collator(
[
{"input_ids": ids, "attention_mask": att, "labels": lab}
for ids, att, lab in zip(input_ids, attention_mask, labels)
]
).to(self.model.device)
input_data.pop("decoder_input_ids", None) # This is directly computed inside the model
input_data["labels"][input_data["labels"] == self.tokenizer.pad_token_id] = -100
else:
input_data = self.data_collator(
[{"input_ids": ids, "attention_mask": att} for ids, att in zip(input_ids, attention_mask)]
).to(self.model.device)
# truncate in case the user has provided input_ids, attention_mask and labels
if self.max_length is not None:
if self.truncation_mode == "keep_start":
input_data = {k: v[: self.max_length] for k, v in input_data.items()}
elif self.truncation_mode == "keep_end":
input_data = {k: v[-self.max_length :] for k, v in input_data.items()}
else:
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
return input_data
@staticmethod
def _step_safety_checker(
input_ids: List[torch.LongTensor],
attention_mask: List[torch.LongTensor],
labels: List[torch.LongTensor],
texts: List[str],
texts_labels: List[str],
):
"""
Check if the input data is valid for training.
Args:
input_ids (List[`torch.LongTensor`]):
List of tensors containing the input_ids
attention_mask (List[`torch.LongTensor`]):
List of tensors containing the attention_mask
labels (List[`torch.FloatTensor`]):
List of tensors containing the labels
texts (List[`str`]):
List of string containing the text input.
texts_labels (List[`str`]):
List of string containing the text labels.
Returns:
`tuple`: The input data.
"""
if texts is None:
if attention_mask is None:
for name, tensor_list in zip(["input_ids", "labels"], [input_ids, labels]):
if not isinstance(tensor_list, list):
raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}")
if not isinstance(tensor_list[0], torch.Tensor):
raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}")
else:
for name, tensor_list in zip(
["input_ids", "attention_mask", "labels"], [input_ids, attention_mask, labels]
):
if not isinstance(tensor_list, list):
raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}")
if not isinstance(tensor_list[0], torch.Tensor):
raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}")
else:
if not isinstance(texts, list):
raise ValueError(f"'text' must be a list of strings - got {type(texts)}")
if not isinstance(texts[0], str):
raise ValueError(f"Elements in 'text' must be strings - got {type(texts[0])}")
if texts_labels is not None:
if not isinstance(texts_labels, list):
raise ValueError(f"'text_labels' must be a list of strings - got {type(texts_labels)}")
if not isinstance(texts_labels[0], str):
raise ValueError(f"Elements in 'text_labels' must be strings - got {type(texts_labels[0])}")
return input_ids, attention_mask, labels, texts, texts_labels
@PPODecorators.empty_device_cache()
def step(
self,
input_ids: Optional[List[torch.LongTensor]] = None,
attention_mask: Optional[List[torch.LongTensor]] = None,
labels: Optional[List[torch.LongTensor]] = None,
texts: Optional[List[str]] = None,
texts_labels: Optional[List[str]] = None,
):
"""
Run an optimisation step given a list of input_ids, attention_mask, and labels or a list of text and text_labels.
Args:
input_ids (List[`torch.LongTensor`]):
List of tensors containing the input_ids (if not provided, text will be used)
attention_mask (List[`torch.LongTensor`], , *optional*):
List of tensors containing the attention_mask
labels (List[`torch.FloatTensor`], *optional*):
List of tensors containing the labels (if set to None, will default to input_ids)
texts (List[`str`], *optional*):
List of strings containing the text input (if not provided, input_ids will directly be used)
texts_labels (List[`str`], *optional*):
List of strings containing the text labels (if set to None, will default to text)
Returns:
`dict[str, Any]`: A summary of the training statistics
"""
self.model.train()
if self.state.global_step == 0:
self.tr_loss = torch.tensor(0.0).to(self.args.device)
self._globalstep_last_logged = self.state.global_step
if input_ids is None and texts is None:
raise ValueError("Step should include `input_ids` or `texts` as keyword arguments.")
elif input_ids is not None and texts is not None:
warnings.warn(
"Both 'input_ids' and 'texts' are provided. 'input_ids' will be overwritten using inputs provided by the 'texts' keyword argument."
)
if labels is None and texts_labels is None and self.is_encoder_decoder:
raise ValueError(
"No 'labels' or 'text_labels' are provided. When using an encoder-decoder architecture, 'labels' or 'text_labels' must be passed."
)
input_ids, attention_mask, labels, texts, texts_labels = self._step_safety_checker(
input_ids, attention_mask, labels, texts, texts_labels
)
if texts is not None:
model_inputs = self.tokenizer(
texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt"
)
input_ids, attention_mask = model_inputs["input_ids"], model_inputs["attention_mask"]
if texts_labels is not None:
labels = self.tokenizer(
texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt"
)["input_ids"]
if labels is None:
warnings.warn("No labels are provided. Setting labels to input_ids")
labels = input_ids
model_inputs = self.prepare_model_inputs(input_ids, attention_mask, labels)
model_inputs_names = list(model_inputs.keys())
batch_dict = {}
batch_dict.update(model_inputs)
def collator(data):
return_dict = dict()
for key in data[0]:
if key in ["input_ids", "attention_mask", "labels"]:
return_dict[key] = torch.stack([d[key] for d in data]).to(self.model.device)
return return_dict
batch_data = Dataset.from_dict(batch_dict)
batch_data.set_format("torch")
step_dataloader = DataLoader(
batch_data,
batch_size=self.args.per_device_train_batch_size,
shuffle=True,
collate_fn=collator,
)
for _, batch in enumerate(step_dataloader):
with self.accelerator.accumulate(self.model):
model_inputs = {k: batch[k] for k in model_inputs_names}
loss = self.compute_loss(self.model, model_inputs)
if self.args.n_gpu > 1:
loss = loss.mean()
tr_loss_step = loss.detach()
self.accelerator.backward(loss)
if self.accelerator.sync_gradients and self.args.max_grad_norm is not None:
self.accelerator.clip_grad_norm_(
self.model.parameters(),
self.args.max_grad_norm,
)
self.optimizer.step()
self.optimizer.zero_grad()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.state.global_step += 1
# update stats etc
self.tr_loss += tr_loss_step
self._maybe_log_save_evaluate()
def _maybe_log_save_evaluate(self):
# check if eval is required
if self.args.eval_steps is not None:
if self.state.global_step % self.args.eval_steps == 0 and self.state.global_step != 0:
self.evaluate(self.eval_dataset)
# check if logging is required
if self.args.logging_steps is not None:
if self.state.global_step % self.args.logging_steps == 0 and self.state.global_step != 0:
logs: Dict[str, float] = {}
tr_loss_scalar = self._nested_gather(self.tr_loss).mean().item()
# reset tr_loss to zero
self.tr_loss -= self.tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._globalstep_last_logged = self.state.global_step
self.log(logs)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/sft_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import inspect
import warnings
from functools import wraps
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from datasets import Dataset
from datasets.arrow_writer import SchemaInferenceError
from datasets.builder import DatasetGenerationError
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollator,
DataCollatorForLanguageModeling,
PreTrainedModel,
PreTrainedTokenizerBase,
Trainer,
TrainingArguments,
)
from transformers.modeling_utils import unwrap_model
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import EvalPrediction
from ..extras.dataset_formatting import get_formatting_func_from_dataset
from ..import_utils import is_peft_available
from .utils import (
ConstantLengthDataset,
DataCollatorForCompletionOnlyLM,
neftune_post_forward_hook,
peft_module_casting_to_bf16,
trl_sanitze_kwargs_for_tagging,
)
if is_peft_available():
from peft import PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
class SFTTrainer(Trainer):
r"""
Class definition of the Supervised Finetuning Trainer (SFT Trainer).
This class is a wrapper around the `transformers.Trainer` class and inherits all of its attributes and methods.
The trainer takes care of properly initializing the PeftModel in case a user passes a `PeftConfig` object.
Args:
model (Union[`transformers.PreTrainedModel`, `nn.Module`, `str`]):
The model to train, can be a `PreTrainedModel`, a `torch.nn.Module` or a string with the model name to
load from cache or download. The model can be also converted to a `PeftModel` if a `PeftConfig` object is
passed to the `peft_config` argument.
args (Optional[`transformers.TrainingArguments`]):
The arguments to tweak for training. Please refer to the official documentation of `transformers.TrainingArguments`
for more information.
data_collator (Optional[`transformers.DataCollator`]):
The data collator to use for training.
train_dataset (Optional[`datasets.Dataset`]):
The dataset to use for training. We recommend users to use `trl.trainer.ConstantLengthDataset` to create their dataset.
eval_dataset (Optional[Union[`datasets.Dataset`, Dict[`str`, `datasets.Dataset`]]]):
The dataset to use for evaluation. We recommend users to use `trl.trainer.ConstantLengthDataset` to create their dataset.
tokenizer (Optional[`transformers.PreTrainedTokenizer`]):
The tokenizer to use for training. If not specified, the tokenizer associated to the model will be used.
model_init (`Callable[[], transformers.PreTrainedModel]`):
The model initializer to use for training. If None is specified, the default model initializer will be used.
compute_metrics (`Callable[[transformers.EvalPrediction], Dict]`, *optional* defaults to None):
The function used to compute metrics during evaluation. It should return a dictionary mapping metric names to metric values.
If not specified, only the loss will be computed during evaluation.
callbacks (`List[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
peft_config (`Optional[PeftConfig]`):
The PeftConfig object to use to initialize the PeftModel.
dataset_text_field (`Optional[str]`):
The name of the text field of the dataset, in case this is passed by a user, the trainer will automatically create a
`ConstantLengthDataset` based on the `dataset_text_field` argument.
formatting_func (`Optional[Callable]`):
The formatting function to be used for creating the `ConstantLengthDataset`.
max_seq_length (`Optional[int]`):
The maximum sequence length to use for the `ConstantLengthDataset` and for automatically creating the Dataset. Defaults to `512`.
infinite (`Optional[bool]`):
Whether to use an infinite dataset or not. Defaults to `False`.
num_of_sequences (`Optional[int]`):
The number of sequences to use for the `ConstantLengthDataset`. Defaults to `1024`.
chars_per_token (`Optional[float]`):
The number of characters per token to use for the `ConstantLengthDataset`. Defaults to `3.6`. You can check how this is computed in the
stack-llama example: https://github.com/huggingface/trl/blob/08f550674c553c36c51d1027613c29f14f3676a5/examples/stack_llama/scripts/supervised_finetuning.py#L53.
packing (`Optional[bool]`):
Used only in case `dataset_text_field` is passed. This argument is used by the `ConstantLengthDataset` to pack the sequences
of the dataset.
dataset_num_proc (`Optional[int]`):
The number of workers to use to tokenize the data. Only used when `packing=False`. Defaults to None.
dataset_batch_size (`int`):
The number of examples to tokenize per batch. If batch_size <= 0 or batch_size == None,
tokenize the full dataset as a single batch. Defaults to 1000.
neftune_noise_alpha (`Optional[float]`):
If not `None`, this will activate NEFTune noise embeddings. This has been proven to drastically improve model performances for instruction
fine-tuning. Check out the original paper here: https://arxiv.org/abs/2310.05914 and the original code here: https://github.com/neelsjain/NEFTune
model_init_kwargs: (`Optional[Dict]`, *optional*):
Dict of Optional kwargs to pass when instantiating the model from a string
dataset_kwargs: (`Optional[Dict]`, *optional*):
Dict of Optional kwargs to pass when creating packed or non-packed datasets
"""
_tag_names = ["trl", "sft"]
def __init__(
self,
model: Union[PreTrainedModel, nn.Module, str] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
peft_config: Optional["PeftConfig"] = None,
dataset_text_field: Optional[str] = None,
packing: Optional[bool] = False,
formatting_func: Optional[Callable] = None,
max_seq_length: Optional[int] = None,
infinite: Optional[bool] = None,
num_of_sequences: Optional[int] = 1024,
chars_per_token: Optional[float] = 3.6,
dataset_num_proc: Optional[int] = None,
dataset_batch_size: int = 1000,
neftune_noise_alpha: Optional[float] = None,
model_init_kwargs: Optional[Dict] = None,
dataset_kwargs: Optional[Dict] = None,
):
if model_init_kwargs is None:
model_init_kwargs = {}
elif not isinstance(model, str):
raise ValueError("You passed model_kwargs to the SFTTrainer. But your model is already instantiated.")
if infinite is not None:
warnings.warn(
"The `infinite` argument is deprecated and will be removed in a future version of TRL. Use `TrainingArguments.max_steps` or `TrainingArguments.num_train_epochs` instead to control training length."
)
if isinstance(model, str):
warnings.warn(
"You passed a model_id to the SFTTrainer. This will automatically create an "
"`AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you."
)
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
if packing and data_collator is not None and isinstance(data_collator, DataCollatorForCompletionOnlyLM):
raise ValueError(
"You passed a `DataCollatorForCompletionOnlyLM` to the SFTTrainer. This is not compatible with the `packing` argument."
)
if is_peft_available() and peft_config is not None:
if not isinstance(peft_config, PeftConfig):
raise ValueError(
"If you want to use the PeftModel, you need to pass a PeftConfig object to the SFTTrainer."
f" and you passed a {type(peft_config)}."
)
if not isinstance(model, PeftModel):
_support_gc_kwargs = hasattr(
args, "gradient_checkpointing_kwargs"
) and "gradient_checkpointing_kwargs" in list(
inspect.signature(prepare_model_for_kbit_training).parameters
)
gradient_checkpointing_kwargs = getattr(args, "gradient_checkpointing_kwargs", None) or {}
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
preprare_model_kwargs = {
"use_gradient_checkpointing": getattr(args, "gradient_checkpointing", False)
}
if _support_gc_kwargs:
preprare_model_kwargs["gradient_checkpointing_kwargs"] = gradient_checkpointing_kwargs
model = prepare_model_for_kbit_training(model, **preprare_model_kwargs)
if args is not None:
args = dataclasses.replace(args, gradient_checkpointing=False)
elif getattr(args, "gradient_checkpointing", False) and (
"use_reentrant" not in gradient_checkpointing_kwargs
or gradient_checkpointing_kwargs["use_reentrant"]
):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
model = get_peft_model(model, peft_config)
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
peft_module_casting_to_bf16(model)
if tokenizer is None:
tokenizer = AutoTokenizer.from_pretrained(model.config._name_or_path)
if getattr(tokenizer, "pad_token", None) is None:
tokenizer.pad_token = tokenizer.eos_token
if max_seq_length is None:
# to overcome some issues with broken tokenizers
max_seq_length = min(tokenizer.model_max_length, 1024)
warnings.warn(
f"You didn't pass a `max_seq_length` argument to the SFTTrainer, this will default to {max_seq_length}"
)
self.dataset_num_proc = dataset_num_proc
self.dataset_batch_size = dataset_batch_size
self._trainer_supports_neftune = hasattr(args, "neftune_noise_alpha")
if neftune_noise_alpha is not None and self._trainer_supports_neftune:
args.neftune_noise_alpha = neftune_noise_alpha
warnings.warn(
"You passed a `neftune_noise_alpha` argument to the SFTTrainer, the value you passed will override the one in the `TrainingArguments`."
)
# self.neftune_noise_alpha is done at Trainer level
elif not self._trainer_supports_neftune:
self.neftune_noise_alpha = neftune_noise_alpha
if formatting_func is None and dataset_text_field is None:
# check if dataset has ChatML format or instruction format and is supported
# if not stays #None
formatting_func = get_formatting_func_from_dataset(train_dataset, tokenizer)
if not packing:
if dataset_text_field is None and formatting_func is None:
raise ValueError(
"You passed `packing=False` to the SFTTrainer, but you didn't pass a `dataset_text_field` or `formatting_func` argument."
)
if data_collator is None:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
if dataset_kwargs is None:
dataset_kwargs = {}
if train_dataset is not None:
train_dataset = self._prepare_dataset(
train_dataset,
tokenizer,
packing,
dataset_text_field,
max_seq_length,
formatting_func,
num_of_sequences,
chars_per_token,
remove_unused_columns=args.remove_unused_columns if args is not None else True,
**dataset_kwargs,
)
if eval_dataset is not None:
_multiple = isinstance(eval_dataset, dict)
_eval_datasets = eval_dataset if _multiple else {"singleton": eval_dataset}
for _eval_dataset_name, _eval_dataset in _eval_datasets.items():
_eval_datasets[_eval_dataset_name] = self._prepare_dataset(
_eval_dataset,
tokenizer,
packing,
dataset_text_field,
max_seq_length,
formatting_func,
num_of_sequences,
chars_per_token,
remove_unused_columns=args.remove_unused_columns if args is not None else True,
**dataset_kwargs,
)
if not _multiple:
eval_dataset = _eval_datasets["singleton"]
if tokenizer.padding_side is not None and tokenizer.padding_side != "right":
warnings.warn(
"You passed a tokenizer with `padding_side` not equal to `right` to the SFTTrainer. This might lead to some unexpected behaviour due to "
"overflow issues when training a model in half-precision. You might consider adding `tokenizer.padding_side = 'right'` to your code."
)
super().__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
model_init=model_init,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
if self.args.max_steps > 0 and packing:
warnings.warn(
"You passed `packing=True` to the SFTTrainer, and you are training your model with `max_steps` strategy. The dataset will be iterated until the `max_steps` are reached."
)
self.train_dataset.infinite = True
elif self.args.max_steps == -1 and packing:
self.train_dataset.infinite = False
@wraps(Trainer.train)
def train(self, *args, **kwargs):
# Activate neftune right before training.
if self.neftune_noise_alpha is not None and not self._trainer_supports_neftune:
self.model = self._trl_activate_neftune(self.model)
output = super().train(*args, **kwargs)
# After training we make sure to retrieve back the original forward pass method
# for the embedding layer by removing the forward post hook.
if self.neftune_noise_alpha is not None and not self._trainer_supports_neftune:
unwrapped_model = unwrap_model(self.model)
if is_peft_available() and isinstance(unwrapped_model, PeftModel):
embeddings = unwrapped_model.base_model.model.get_input_embeddings()
else:
embeddings = unwrapped_model.get_input_embeddings()
self.neftune_hook_handle.remove()
del embeddings.neftune_noise_alpha
return output
@wraps(Trainer.push_to_hub)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
"""
Overwrite the `push_to_hub` method in order to force-add the tag "sft" when pushing the
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
"""
kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs)
return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs)
def _prepare_dataset(
self,
dataset,
tokenizer,
packing,
dataset_text_field,
max_seq_length,
formatting_func,
num_of_sequences,
chars_per_token,
remove_unused_columns=True,
append_concat_token=True,
add_special_tokens=True,
):
if dataset is None:
raise ValueError("The dataset should not be None")
# check if torch dataset / dataloader and do nothing
if isinstance(dataset, (torch.utils.data.IterableDataset, torch.utils.data.Dataset, ConstantLengthDataset)):
return dataset
if not packing:
return self._prepare_non_packed_dataloader(
tokenizer,
dataset,
dataset_text_field,
max_seq_length,
formatting_func,
add_special_tokens,
remove_unused_columns,
)
else:
return self._prepare_packed_dataloader(
tokenizer,
dataset,
dataset_text_field,
max_seq_length,
num_of_sequences,
chars_per_token,
formatting_func,
append_concat_token,
add_special_tokens,
)
def _prepare_non_packed_dataloader(
self,
tokenizer,
dataset,
dataset_text_field,
max_seq_length,
formatting_func=None,
add_special_tokens=True,
remove_unused_columns=True,
):
use_formatting_func = formatting_func is not None and dataset_text_field is None
self._dataset_sanity_checked = False
# Inspired from: https://huggingface.co/learn/nlp-course/chapter7/6?fw=pt
def tokenize(element):
outputs = tokenizer(
element[dataset_text_field] if not use_formatting_func else formatting_func(element),
add_special_tokens=add_special_tokens,
truncation=True,
padding=False,
max_length=max_seq_length,
return_overflowing_tokens=False,
return_length=False,
)
if use_formatting_func and not self._dataset_sanity_checked:
if not isinstance(formatting_func(element), list):
raise ValueError(
"The `formatting_func` should return a list of processed strings since it can lead to silent bugs."
)
else:
self._dataset_sanity_checked = True
return {
"input_ids": outputs["input_ids"],
"labels": outputs["input_ids"],
"attention_mask": outputs["attention_mask"],
}
signature_columns = ["input_ids", "labels", "attention_mask"]
extra_colmuns = list(set(dataset.column_names) - set(signature_columns))
if not remove_unused_columns and len(extra_colmuns) > 0:
warnings.warn(
"You passed `remove_unused_columns=False` on a non-packed dataset. This might create some issues with the default collator and yield to errors. If you want to "
f"inspect dataset other columns (in this case {extra_colmuns}), you can subclass `DataCollatorForLanguageModeling` in case you used the default collator and create your own data collator in order to inspect the unused dataset columns."
)
tokenized_dataset = dataset.map(
tokenize,
batched=True,
remove_columns=dataset.column_names if remove_unused_columns else None,
num_proc=self.dataset_num_proc,
batch_size=self.dataset_batch_size,
)
return tokenized_dataset
def _prepare_packed_dataloader(
self,
tokenizer,
dataset,
dataset_text_field,
max_seq_length,
num_of_sequences,
chars_per_token,
formatting_func=None,
append_concat_token=True,
add_special_tokens=True,
):
if dataset_text_field is not None or formatting_func is not None:
if tokenizer is None:
raise ValueError("You need to pass a tokenizer when using `dataset_text_field` with `SFTTrainer`.")
constant_length_iterator = ConstantLengthDataset(
tokenizer,
dataset,
dataset_text_field=dataset_text_field,
formatting_func=formatting_func,
seq_length=max_seq_length,
infinite=False,
num_of_sequences=num_of_sequences,
chars_per_token=chars_per_token,
eos_token_id=tokenizer.eos_token_id,
append_concat_token=append_concat_token,
add_special_tokens=add_special_tokens,
)
def data_generator(constant_length_iterator):
for i in constant_length_iterator:
yield i
try:
packed_dataset = Dataset.from_generator(
data_generator, gen_kwargs={"constant_length_iterator": constant_length_iterator}
)
except (DatasetGenerationError, SchemaInferenceError):
raise ValueError(
"Error occurred while packing the dataset. Make sure that your dataset has enough samples to at least yield one packed sequence."
)
return packed_dataset
else:
raise ValueError(
"You need to pass a `dataset_text_field` or `formatting_func` argument to the SFTTrainer if you want to use the `ConstantLengthDataset`."
)
def _trl_activate_neftune(self, model):
r"""
Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914
Since in transformers Trainer we do have an `_activate_neftune` method, we need to rename this method to avoid conflicts.
"""
unwrapped_model = unwrap_model(model)
if is_peft_available() and isinstance(unwrapped_model, PeftModel):
embeddings = unwrapped_model.base_model.model.get_input_embeddings()
else:
embeddings = unwrapped_model.get_input_embeddings()
embeddings.neftune_noise_alpha = self.neftune_noise_alpha
hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook)
self.neftune_hook_handle = hook_handle
return model
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/ppo_trainer.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import os
import time
import typing
import warnings
from contextlib import nullcontext
from typing import Callable, List, Optional, Union
import datasets
import numpy as np
import torch
import torch.nn.functional as F
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, gather_object, is_deepspeed_available
from datasets import Dataset
from huggingface_hub import whoami
from packaging import version
from torch.optim import Adam
from transformers import (
DataCollatorForLanguageModeling,
PreTrainedTokenizer,
PreTrainedTokenizerBase,
PreTrainedTokenizerFast,
)
from ..core import (
WANDB_PADDING,
PPODecorators,
clip_by_value,
convert_to_scalar,
entropy_from_logits,
flatten_dict,
logprobs_from_logits,
masked_mean,
masked_var,
masked_whiten,
set_seed,
stack_dicts,
stats_to_np,
)
from ..import_utils import is_npu_available, is_torch_greater_2_0, is_xpu_available
from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper, create_reference_model
from . import AdaptiveKLController, BaseTrainer, FixedKLController, PPOConfig, RunningMoments
if is_deepspeed_available():
import deepspeed
MODEL_CARD_TEMPLATE = """---
license: apache-2.0
tags:
- trl
- ppo
- transformers
- reinforcement-learning
---
# {model_name}
This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to
guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.
## Usage
To use this model for inference, first install the TRL library:
```bash
python -m pip install trl
```
You can then generate text as follows:
```python
from transformers import pipeline
generator = pipeline("text-generation", model="{model_id}")
outputs = generator("Hello, my llama is cute")
```
If you want to use the model for training or to obtain the outputs from the value head, load the model as follows:
```python
from transformers import AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead
tokenizer = AutoTokenizer.from_pretrained("{model_id}")
model = AutoModelForCausalLMWithValueHead.from_pretrained("{model_id}")
inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
outputs = model(**inputs, labels=inputs["input_ids"])
```
"""
class PPOTrainer(BaseTrainer):
"""
The PPOTrainer uses Proximal Policy Optimization to optimise language models.
Note, this trainer is heavily inspired by the original OpenAI learning to summarize work here:
https://github.com/openai/summarize-from-feedback
Attributes:
**config** (`PPOConfig`) -- Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more
details.
**model** (`PreTrainedModelWrapper`) -- Model to be optimized, Hugging Face transformer model with a value head.
Check the documentation of `PreTrainedModelWrapper` for more details.
**ref_model** (`PreTrainedModelWrapper`, *optional*) -- Reference model to be used for KL penalty, Hugging Face
transformer model with a casual language modelling head. Check the documentation of `PreTrainedModelWrapper`
for more details. If no reference model is provided, the trainer will create a reference model with the same
architecture as the model to be optimized with shared layers.
**tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the
data. Check the documentation of `transformers.PreTrainedTokenizer` and
`transformers.PreTrainedTokenizerFast` for more details.
**dataset** (Union[`torch.utils.data.Dataset`, `datasets.Dataset`], *optional*) -- PyTorch dataset or Hugging
Face dataset. This is used to create a PyTorch dataloader. If no dataset is provided, the dataloader must be
created outside the trainer users needs to design their own dataloader and make sure the batch
size that is used is the same as the one specified in the configuration object.
**optimizer** (`torch.optim.Optimizer`, *optional*) -- Optimizer to be used for training. If no optimizer is
provided, the trainer will create an Adam optimizer with the learning rate specified in the configuration
object.
**data_collator** (DataCollatorForLanguageModeling, *optional*) -- Data collator to be used for training and
passed along the dataloader
**num_shared_layers** (int, *optional*) -- Number of layers to be shared between the model and the reference
model, if no reference model is passed. If no number is provided, all the layers will be shared.
**lr_scheduler** (`torch.optim.lr_scheduler`, *optional*) -- Learning rate scheduler to be used for training.
"""
_tag_names = ["trl", "ppo"]
def __init__(
self,
config: PPOConfig = None,
model: PreTrainedModelWrapper = None,
ref_model: Optional[PreTrainedModelWrapper] = None,
tokenizer: PreTrainedTokenizerBase = None,
dataset: Optional[Union[torch.utils.data.Dataset, Dataset]] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
data_collator: Optional[typing.Callable] = None,
num_shared_layers: Optional[int] = None,
lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
):
"""
Initialize PPOTrainer.
Args:
config (`PPOConfig`):
Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more details.
model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a value head.
ref_model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a casual language modelling head. Used for KL penalty
tokenizer (`transformers.PreTrainedTokenizerBase`):
Hugging Face tokenizer
dataset (Optional[Union[`torch.utils.data.Dataset`, `datasets.Dataset`]]):
PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
will be preprocessed by removing the columns that are not used by the model. If none is passed,
a warning will be raised in a multi-GPU setting.
optimizer (Optional[`torch.optim.Optimizer`]):
Optimizer used for training. If `None`, the `Adam` is used as default.
data_collator (Optional[function]):
Data collator function.
num_shared_layers (Optional[int]):
Number of shared layers between the model and the reference model. If `None`, all layers are shared.
used only if `ref_model` is `None`.
lr_scheduler (Optional[`torch.optim.lr_scheduler`]):
Learning rate scheduler used for training.
"""
super().__init__(config)
# initial seed for reproducible experiments
set_seed(config.seed)
# Step 0: check positional arguments validity
if not isinstance(config, PPOConfig):
raise ValueError(f"config must be a PPOConfig, got {type(config)}")
if not isinstance(tokenizer, (PreTrainedTokenizerBase)):
raise ValueError(
f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}"
)
if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
raise ValueError(
f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
)
# Step 1: Initialize Accelerator
self.accelerator = Accelerator(
log_with=config.log_with,
gradient_accumulation_steps=config.gradient_accumulation_steps,
project_config=ProjectConfiguration(**config.project_kwargs),
**config.accelerator_kwargs,
)
# Step 1.1 Runtime variables filled by the accelerator
config.world_size = self.accelerator.num_processes
config.global_backward_batch_size = config.backward_batch_size * config.world_size
config.global_batch_size = config.batch_size * config.world_size
self.model = model
self.model_params = filter(lambda p: p.requires_grad, self.model.parameters())
self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder")
self.is_peft_model = getattr(self.model, "is_peft_model", False)
config.is_encoder_decoder = self.is_encoder_decoder
config.is_peft_model = self.is_peft_model
is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard"
self.accelerator.init_trackers(
config.tracker_project_name,
config=dict(trl_ppo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(),
init_kwargs=config.tracker_kwargs,
)
self.is_using_text_environment = getattr(config, "use_text_environment", False)
if isinstance(ref_model, SUPPORTED_ARCHITECTURES):
self.ref_model = ref_model
if num_shared_layers is not None:
warnings.warn(
"num_shared_layers is ignored when ref_model is provided. Two different models are used for the "
"model and the reference model and no layers are shared.",
UserWarning,
)
elif ref_model is None and not self.is_peft_model:
self.ref_model = create_reference_model(self.model, num_shared_layers=num_shared_layers)
elif self.is_peft_model:
self.ref_model = None
else:
raise ValueError(
f"ref_model must be a PreTrainedModelWrapper or `None`, got {type(ref_model)} - supported "
f"architectures are: {SUPPORTED_ARCHITECTURES} "
)
self.optional_peft_ctx = (
self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter
if self.is_peft_model
else nullcontext
)
if not (isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError(
"tokenizer must be a transformers.PreTrainedTokenizer or transformers.PreTrainedTokenizerFast"
)
self.tokenizer = tokenizer
if dataset is not None and not (isinstance(dataset, torch.utils.data.Dataset) or isinstance(dataset, Dataset)):
raise ValueError("dataset must be a torch.utils.data.Dataset or datasets.Dataset")
elif dataset is None:
warnings.warn(
"No dataset is provided. Make sure to set config.batch_size to the correct value before training.",
UserWarning,
)
self.dataset = dataset
self._signature_columns = None
if self.dataset is not None:
self.dataloader = self.prepare_dataloader(self.dataset, data_collator)
elif self.dataset is None and self.accelerator.num_processes > 1:
warnings.warn(
"No dataset is provided. In a multi-GPU setting, this will lead to an error. You should"
" prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`"
" and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please "
" refer to the documentation for more details.",
UserWarning,
)
self.dataloader = None
else:
self.dataloader = None
# Step 3: Initialize optimizer and data collator
self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
if optimizer is None:
self.optimizer = Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.config.learning_rate,
)
else:
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
if self.lr_scheduler is not None:
lr_scheduler_class = (
torch.optim.lr_scheduler._LRScheduler
if not is_torch_greater_2_0()
else torch.optim.lr_scheduler.LRScheduler
)
if not isinstance(self.lr_scheduler, lr_scheduler_class):
raise ValueError(
"lr_scheduler must be a torch.optim.lr_scheduler._LRScheduler or torch.optim.lr_scheduler.LRScheduler (for torch >= 2.0)"
)
if self.config.adap_kl_ctrl:
self.kl_ctl = AdaptiveKLController(self.config.init_kl_coef, self.config.target, self.config.horizon)
else:
self.kl_ctl = FixedKLController(self.config.init_kl_coef)
# Safety checkers for DS integration
is_deepspeed_used = self.accelerator.distributed_type == "DEEPSPEED" and hasattr(
self.accelerator.state, "deepspeed_plugin"
)
(
self.model,
self.optimizer,
self.data_collator,
self.dataloader,
self.lr_scheduler,
) = self.accelerator.prepare(
self.model,
self.optimizer,
self.data_collator,
self.dataloader,
self.lr_scheduler,
)
if is_deepspeed_used:
# Quantized models are already set on the correct device
if not self.is_peft_model and not (
getattr(self.ref_model.pretrained_model, "is_loaded_in_8bit", False)
or getattr(self.ref_model.pretrained_model, "is_loaded_in_4bit", False)
):
self.ref_model = self._prepare_deepspeed(self.ref_model)
else:
self.ref_model = self.accelerator.prepare(self.ref_model)
# In a distributed setup, only logging needs to be performed on the main process
# check: https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html
# or: https://discuss.pytorch.org/t/use-distributed-data-parallel-correctly/82500/11
self.is_distributed = self.accelerator.num_processes > 1
# init the current step
self.current_step = 0
# init variables for pushing model to hub
if config.push_to_hub_if_best_kwargs:
if "repo_id" not in config.push_to_hub_if_best_kwargs:
raise ValueError("You have to specify repo_id in order to push the model to the hub!")
self.push_to_hub_kwargs = config.push_to_hub_if_best_kwargs
self.compare_step = 0
self.highest_reward = torch.tensor(-float("inf"))
# post process for PP
if not getattr(self.model, "is_sequential_parallel", False):
self.current_device = self.accelerator.device
else:
if is_xpu_available():
self.current_device = torch.device("xpu:0")
elif is_npu_available():
self.current_device = torch.device("npu:0")
else:
self.current_device = torch.device("cuda:0")
PPODecorators.optimize_device_cache = self.config.optimize_device_cache
self.running = RunningMoments(self.accelerator)
def _filter_kwargs(self, kwargs, target_func):
"""
filter the keyword arguments that are supported by the target function.
Args:
kwargs (dict):
Keyword arguments
target_func (function):
Target function
"""
return {k: v for k, v in kwargs.items() if k in inspect.signature(target_func).parameters.keys()}
def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], data_collator=None):
"""
Prepare the dataloader for training.
Args:
dataset (Union[`torch.utils.data.Dataset`, `datasets.Dataset`]):
PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
will be preprocessed by removing the columns that are not used by the model.
data_collator (Optional[function]):
Data collator function.
Returns:
`torch.utils.data.DataLoader`: PyTorch dataloader
"""
if isinstance(dataset, Dataset):
dataset = self._remove_unused_columns(dataset)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.config.batch_size,
collate_fn=data_collator,
shuffle=True,
drop_last=True,
)
return dataloader
# Adapted from transformers.Trainer._set_signature_columns_if_needed
def _set_signature_columns_if_needed(self):
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# label => sentiment | we need query and response for logging purpose
self._signature_columns += ["label", "query", "response"]
# Adapted from transformers.Trainer._remove_unused_columns
def _remove_unused_columns(self, dataset: "Dataset"):
if not self.config.remove_unused_columns:
return dataset
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
columns = [k for k in signature_columns if k in dataset.column_names]
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"],
columns=columns,
format_kwargs=dataset.format["format_kwargs"],
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def generate(
self,
query_tensor: Union[torch.Tensor, List[torch.Tensor]],
length_sampler: Callable = None,
batch_size: int = 4,
return_prompt: bool = True,
generate_ref_response: bool = False,
**generation_kwargs,
):
"""
Generate response with the model given the query tensor.
call the `generate` method of the model.
Args:
query_tensor (`torch.LongTensor`):
A tensor of shape (`seq_len`) containing query tokens or a list of tensors of shape (`seq_len`).
generation_kwargs (dict[str, Any]):
Keyword arguments for generation.
length_sampler (`Callable`, *optional*):
Callable that returns the number of newly generated tokens.
batch_size (`int`, *optional):
Batch size used for generation, defaults to `4`.
return_prompt (`bool`, *optional*):
If set to `False` the prompt is not returned but only the newly generated tokens, defaults to `True`.
generate_ref_response (`bool`, *optional*):
If set to `True` the reference response is also generated, defaults to `False`.
Returns:
`torch.LongTensor`: A tensor of shape (`batch_size`, `gen_len`) containing response tokens.
"""
if generate_ref_response:
ref_model = self.model if self.is_peft_model else self.ref_model
if isinstance(query_tensor, List):
response = self._generate_batched(
self.model,
query_tensor,
length_sampler=length_sampler,
batch_size=batch_size,
return_prompt=return_prompt,
**generation_kwargs,
)
if generate_ref_response:
with self.optional_peft_ctx():
ref_response = self._generate_batched(
ref_model,
query_tensor,
length_sampler=length_sampler,
batch_size=batch_size,
return_prompt=return_prompt,
**generation_kwargs,
)
else:
if len(query_tensor.shape) == 2:
raise ValueError(
"query_tensor must be a tensor of shape (`seq_len`) or a list of tensors of shape (`seq_len`)"
)
if length_sampler is not None:
generation_kwargs["max_new_tokens"] = length_sampler()
response = self.accelerator.unwrap_model(self.model).generate(
input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs
)
if generate_ref_response:
with self.optional_peft_ctx():
ref_response = ref_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs)
if not return_prompt and not self.is_encoder_decoder:
response = response[:, query_tensor.shape[0] :]
if generate_ref_response:
ref_response = ref_response[:, query_tensor.shape[0] :]
if generate_ref_response:
return response, ref_response
return response
def _generate_batched(
self,
model: PreTrainedModelWrapper,
query_tensors: List[torch.Tensor],
length_sampler: Callable = None,
batch_size: int = 4,
return_prompt: bool = True,
pad_to_multiple_of: int = None,
remove_padding: bool = True,
**generation_kwargs,
):
outputs = []
padding_side_default = self.tokenizer.padding_side
if not self.is_encoder_decoder:
self.tokenizer.padding_side = "left"
# in case we have fewer examples than bs
batch_size = min(len(query_tensors), batch_size)
for i in range(0, len(query_tensors), batch_size):
if length_sampler is not None:
generation_kwargs["max_new_tokens"] = length_sampler()
# prevent overflow if query tensors are not even multiple of bs
end_index = min(len(query_tensors), i + batch_size)
batch = query_tensors[i:end_index]
batch_mask = [torch.ones_like(element) for element in batch]
inputs = {"input_ids": batch, "attention_mask": batch_mask}
padded_inputs = self.tokenizer.pad(
inputs,
padding=True,
max_length=None,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
).to(self.current_device)
generations = self.accelerator.unwrap_model(model).generate(**padded_inputs, **generation_kwargs)
for generation, mask in zip(generations, padded_inputs["attention_mask"]):
if not self.is_encoder_decoder:
output = generation[(1 - mask).sum() :] # remove padding
else:
output = generation
if not return_prompt and not self.is_encoder_decoder:
output = output[(mask).sum() :] # remove prompt
if remove_padding and self.tokenizer.eos_token_id in output:
pad_mask = output == self.tokenizer.eos_token_id
pad_start = torch.nonzero(pad_mask, as_tuple=False)[0, 0].item()
output = output[: pad_start + 1] # keep the eos token at the end
outputs.append(output)
self.tokenizer.padding_side = padding_side_default
return outputs
def _step_safety_checker(
self,
batch_size: int,
queries: List[torch.LongTensor],
responses: List[torch.LongTensor],
scores: List[torch.FloatTensor],
masks: Optional[List[torch.LongTensor]] = None,
):
"""
Check if the input data is valid for training.
Args:
batch_size (int):
Batch size from the config file.
queries (List[`torch.LongTensor`]):
List of tensors containing the encoded queries of shape (`query_length`)
responses (List[`torch.LongTensor`]):
List of tensors containing the encoded responses of shape (`response_length`)
scores (List[`torch.FloatTensor`]):
List of tensors containing the scores.
masks (List[`torch.LongTensor`], *optional*):
list of optional tensors containing the masks of shape (`query_length` + `response_length`)
Returns:
`tuple`: The input processed data.
"""
for name, tensor_list in zip(["queries", "responses", "scores"], [queries, responses, scores]):
if not isinstance(tensor_list, list):
raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}")
if not isinstance(tensor_list[0], torch.Tensor):
raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}")
if batch_size is not None and len(tensor_list) != batch_size:
raise ValueError(
f"Batch size ({batch_size}) does not match number of examples - but got {len(tensor_list)} for: {name}"
)
# add queries, scores and responses on the correct device
queries = [tensor.to(self.current_device) for tensor in queries]
responses = [tensor.to(self.current_device) for tensor in responses]
scores = [tensor.to(self.current_device) for tensor in scores]
masks = [tensor.to(self.current_device) for tensor in masks] if masks is not None else None
# squeeze scores if needed
for i, score in enumerate(scores):
if score.dim() > 1:
raise ValueError(f"Scores must be 1-dimensional - got {score.dim()} for {score}")
elif score.dim() == 1:
scores[i] = score.squeeze()
return queries, responses, scores, masks
@PPODecorators.empty_device_cache()
def step(
self,
queries: List[torch.LongTensor],
responses: List[torch.LongTensor],
scores: List[torch.FloatTensor],
response_masks: Optional[List[torch.LongTensor]] = None,
):
"""
Run a PPO optimisation step given a list of queries, model responses, and rewards.
Args:
queries (List[`torch.LongTensor`]):
List of tensors containing the encoded queries of shape (`query_length`)
responses (List[`torch.LongTensor`]):
List of tensors containing the encoded responses of shape (`response_length`)
scores (List[`torch.FloatTensor`]):
List of tensors containing the scores.
response_masks (List[`torch.FloatTensor`], *optional*)):
List of tensors containing masks of the response tokens.
Returns:
`dict[str, Any]`: A summary of the training statistics
"""
bs = self.config.batch_size
queries, responses, scores, response_masks = self._step_safety_checker(
bs, queries, responses, scores, response_masks
)
scores = torch.tensor(scores, device=self.current_device)
if self.config.use_score_scaling:
# Score scaling
scores_mean, scores_std = self.running.update(scores)
tensor_to_kwargs = dict(dtype=scores.dtype, device=scores.device)
score_scaling_factor = self.running.std.to(**tensor_to_kwargs) + torch.finfo(scores.dtype).eps
if self.config.use_score_norm:
scores = (scores - self.running.mean.to(**tensor_to_kwargs)) / score_scaling_factor
else:
scores /= score_scaling_factor
if self.config.score_clip is not None:
# Score clipping
scores_dtype = scores.dtype
scores = torch.clip(scores.float(), -self.config.score_clip, self.config.score_clip).to(dtype=scores_dtype)
# if we want to push best model to the hub
if hasattr(self, "highest_reward"):
if self.compare_step % self.config.compare_steps == 0:
curr_mean_reward = scores.mean()
# if the best reward ever seen
if curr_mean_reward > self.highest_reward:
self.highest_reward = curr_mean_reward
# push model to hub
self.push_to_hub(**self.push_to_hub_kwargs)
self.compare_step += 1
timing = dict()
t0 = time.time()
t = time.time()
model_inputs = self.prepare_model_inputs(queries, responses)
if self.is_distributed:
pad_first = self.tokenizer.padding_side == "left"
model_inputs["input_ids"] = self.accelerator.pad_across_processes(
model_inputs["input_ids"],
dim=1,
pad_index=self.tokenizer.pad_token_id,
pad_first=pad_first,
)
model_inputs["attention_mask"] = self.accelerator.pad_across_processes(
model_inputs["attention_mask"], dim=1, pad_index=0, pad_first=pad_first
)
if self.is_encoder_decoder:
model_inputs["decoder_input_ids"] = self.accelerator.pad_across_processes(
model_inputs["decoder_input_ids"],
dim=1,
pad_index=self.tokenizer.pad_token_id,
pad_first=pad_first,
)
model_inputs["decoder_attention_mask"] = self.accelerator.pad_across_processes(
model_inputs["decoder_attention_mask"],
dim=1,
pad_index=0,
pad_first=pad_first,
)
model_inputs_names = list(model_inputs.keys())
full_kl_penalty = self.config.kl_penalty == "full"
with torch.no_grad():
all_logprobs, logits_or_none, values, masks = self.batched_forward_pass(
self.model,
queries,
responses,
model_inputs,
response_masks=response_masks,
return_logits=full_kl_penalty,
)
with self.optional_peft_ctx():
ref_logprobs, ref_logits_or_none, _, _ = self.batched_forward_pass(
self.model if self.is_peft_model else self.ref_model,
queries,
responses,
model_inputs,
return_logits=full_kl_penalty,
)
timing["time/ppo/forward_pass"] = time.time() - t
with torch.no_grad():
t = time.time()
if full_kl_penalty:
active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False)
ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False)
rewards, non_score_reward, kls = self.compute_rewards(
scores, active_full_logprobs, ref_full_logprobs, masks
)
else:
rewards, non_score_reward, kls = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks)
timing["time/ppo/compute_rewards"] = time.time() - t
t = time.time()
values, advantages, returns = self.compute_advantages(values, rewards, masks)
timing["time/ppo/compute_advantages"] = time.time() - t
# upcast to float32 to avoid dataset issues
batch_dict = {
"queries": queries,
"responses": responses,
"logprobs": all_logprobs.to(torch.float32),
"values": values.to(torch.float32),
"masks": masks,
"advantages": advantages,
"returns": returns,
}
batch_dict.update(model_inputs)
t = time.time()
all_stats = []
early_stop = False
for _ in range(self.config.ppo_epochs):
if early_stop:
break
b_inds = np.random.permutation(bs)
for backward_batch_start in range(0, bs, self.config.backward_batch_size):
backward_batch_end = backward_batch_start + self.config.backward_batch_size
backward_batch_inds = b_inds[backward_batch_start:backward_batch_end]
for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size):
mini_batch_end = mini_batch_start + self.config.mini_batch_size
mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end]
mini_batch_dict = {
"logprobs": batch_dict["logprobs"][mini_batch_inds],
"values": batch_dict["values"][mini_batch_inds],
"masks": batch_dict["masks"][mini_batch_inds],
# hacks: the queries and responses are ragged.
"queries": [batch_dict["queries"][i] for i in mini_batch_inds],
"responses": [batch_dict["responses"][i] for i in mini_batch_inds],
"advantages": batch_dict["advantages"][mini_batch_inds],
"returns": batch_dict["returns"][mini_batch_inds],
}
for k in model_inputs_names:
mini_batch_dict[k] = batch_dict[k][mini_batch_inds]
with self.accelerator.accumulate(self.model):
model_inputs = {k: mini_batch_dict[k] for k in model_inputs_names}
logprobs, logits, vpreds, _ = self.batched_forward_pass(
self.model,
mini_batch_dict["queries"],
mini_batch_dict["responses"],
model_inputs,
return_logits=True,
)
train_stats = self.train_minibatch(
mini_batch_dict["logprobs"],
mini_batch_dict["values"],
logprobs,
logits,
vpreds,
mini_batch_dict["masks"],
mini_batch_dict["advantages"],
mini_batch_dict["returns"],
)
all_stats.append(train_stats)
# typically, early stopping is done at the epoch level
if self.config.early_stopping:
policykl = train_stats["policy/policykl"]
early_stop = self._early_stop(policykl)
if early_stop:
break
timing["time/ppo/optimize_step"] = time.time() - t
t = time.time()
train_stats = stack_dicts(all_stats)
# reshape advantages/ratios such that they are not averaged.
train_stats["policy/advantages"] = torch.flatten(train_stats["policy/advantages"]).unsqueeze(0)
train_stats["policy/advantages"] = torch.nan_to_num(train_stats["policy/advantages"], WANDB_PADDING)
train_stats["policy/ratio"] = torch.flatten(train_stats["policy/ratio"]).unsqueeze(0)
stats = self.record_step_stats(
scores=scores,
logprobs=all_logprobs,
ref_logprobs=ref_logprobs,
non_score_reward=non_score_reward,
train_stats=train_stats,
kl_coef=self.kl_ctl.value,
masks=masks,
queries=queries,
responses=responses,
kls=kls,
)
# Gather/Reduce stats from all processes
if self.is_distributed:
stats = self.gather_stats(stats)
stats = stats_to_np(stats)
timing["time/ppo/calc_stats"] = time.time() - t
stats["ppo/learning_rate"] = self.optimizer.param_groups[0]["lr"]
# Update the KL control - multiply the batch_size by the number of processes
self.kl_ctl.update(
stats["objective/kl"],
self.config.batch_size * self.accelerator.num_processes,
)
# Log the total ppo time
timing["time/ppo/total"] = time.time() - t0
stats.update(timing)
# post-process stats for tensorboard and other loggers
if self.config.log_with != "wandb":
stats = convert_to_scalar(stats)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return stats
def _early_stop(self, policykl):
r"""
Handles the early stopping logic. If the policy KL is greater than the target KL, then the gradient is zeroed and
the optimization step is skipped.
This also handles the multi-gpu case where the policy KL is averaged across all processes.
Args:
policy_kl (torch.Tensor):
the policy KL
Returns:
`bool`: whether to early stop or not
"""
early_stop = False
if not self.config.early_stopping:
return early_stop
if not self.is_distributed and policykl > 1.5 * self.config.target_kl:
self.optimizer.zero_grad()
early_stop = True
elif self.is_distributed:
import torch.distributed as dist
# Wait for all processes to finish
dist.barrier()
# all gather the policykl
dist.all_reduce(policykl, dist.ReduceOp.SUM)
policykl /= self.accelerator.num_processes
if policykl > 1.5 * self.config.target_kl:
self.optimizer.zero_grad()
early_stop = True
return early_stop
def gather_stats(self, stats):
"""
Gather stats from all processes. Useful in the context of distributed training.
Args:
stats (dict[str, Any]):
a dictionary of stats to be gathered. The stats should contain torch tensors.
Returns:
`dict[str, Any]`: A dictionary of stats with the tensors gathered.
"""
import torch.distributed as dist
# Wait for all processes to finish
dist.barrier()
for k, v in stats.items():
if isinstance(v, torch.Tensor):
dist.all_reduce(v.to(self.accelerator.device), dist.ReduceOp.SUM)
v /= self.accelerator.num_processes
stats[k] = v
return stats
def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor):
if self.is_encoder_decoder:
input_data = self.data_collator(
[{"input_ids": q, "attention_mask": torch.ones_like(q)} for q in queries]
).to(self.current_device)
decoder_inputs = self.data_collator(
[{"input_ids": r, "attention_mask": torch.ones_like(r)} for r in responses]
).to(self.current_device)
input_data["decoder_input_ids"] = decoder_inputs["input_ids"]
input_data["decoder_attention_mask"] = decoder_inputs["attention_mask"]
else:
input_ids = [torch.cat([q, r]) for q, r in zip(queries, responses)]
input_data = self.data_collator(
[{"input_ids": ids, "attention_mask": torch.ones_like(ids)} for ids in input_ids]
).to(self.current_device)
input_data.pop("labels", None) # we don't want to compute LM losses
return input_data
@PPODecorators.empty_device_cache()
def batched_forward_pass(
self,
model: PreTrainedModelWrapper,
queries: torch.Tensor,
responses: torch.Tensor,
model_inputs: dict,
return_logits: bool = False,
response_masks: Optional[torch.Tensor] = None,
):
"""
Calculate model outputs in multiple batches.
Args:
queries (`torch.LongTensor`):
List of tensors containing the encoded queries, shape (`batch_size`, `query_length`)
responses (`torch.LongTensor`):
List of tensors containing the encoded responses, shape (`batch_size`, `response_length`)
return_logits (`bool`, *optional*, defaults to `False`):
Whether to return all_logits. Set to `False` if logits are not needed to reduce memory consumption.
Returns:
(tuple):
- all_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
shape (`batch_size`, `response_length`)
- all_ref_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
shape (`batch_size`, `response_length`)
- all_values (`torch.FloatTensor`): Values of the responses, shape (`batch_size`, `response_length`)
"""
bs = len(queries)
fbs = self.config.mini_batch_size
all_logprobs = []
all_logits = []
all_masks = []
all_values = []
model.eval()
for i in range(math.ceil(bs / fbs)):
input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
query_batch = queries[i * fbs : (i + 1) * fbs]
response_batch = responses[i * fbs : (i + 1) * fbs]
if response_masks is not None:
response_masks_batch = response_masks[i * fbs : (i + 1) * fbs]
logits, _, values = model(**input_kwargs)
if self.is_encoder_decoder:
input_ids = input_kwargs["decoder_input_ids"]
attention_mask = input_kwargs["decoder_attention_mask"]
else:
input_ids = input_kwargs["input_ids"]
attention_mask = input_kwargs["attention_mask"]
logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
masks = torch.zeros_like(attention_mask)
masks[:, :-1] = attention_mask[:, 1:]
for j in range(len(query_batch)):
if self.is_encoder_decoder:
# Decoder sentence starts always in the index 1 after padding in the Enc-Dec Models
start = 1
end = attention_mask[j, :].sum() - 1
else:
start = len(query_batch[j]) - 1 # logprobs starts from the second query token
if attention_mask[j, 0] == 0: # offset left padding
start += attention_mask[j, :].nonzero()[0]
end = start + len(response_batch[j])
if response_masks is not None:
response_masks_batch[j] = torch.cat(
(torch.zeros_like(query_batch[j]), response_masks_batch[j])
)[1:]
masks[j, :start] = 0
masks[j, end:] = 0
if response_masks is not None:
masks[j, start:end] = masks[j, start:end] * response_masks_batch[j][start:end]
if return_logits:
all_logits.append(logits)
else:
del logits
all_values.append(values)
all_logprobs.append(logprobs)
all_masks.append(masks)
return (
torch.cat(all_logprobs),
torch.cat(all_logits)[:, :-1] if return_logits else None,
torch.cat(all_values)[:, :-1],
torch.cat(all_masks)[:, :-1],
)
@PPODecorators.empty_device_cache()
def train_minibatch(
self,
old_logprobs: torch.FloatTensor,
values: torch.FloatTensor,
logprobs: torch.FloatTensor,
logits: torch.FloatTensor,
vpreds: torch.FloatTensor,
mask: torch.LongTensor,
advantages: torch.FloatTensor,
returns: torch.FloatTensor,
):
"""
Train one PPO minibatch
Args:
logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape [mini_batch_size, response_length]
values (`torch.FloatTensor`):
Values of the value head, shape [mini_batch_size, response_length]
query (`torch.LongTensor`):
Encoded queries, shape [mini_batch_size, query_length]
response (`torch.LongTensor`):
Encoded responses, shape [mini_batch_size, response_length]
model_input (`torch.LongTensor`):
Concatenated queries and responses, shape [mini_batch_size, query_length+response_length]
Returns:
train_stats (dict[str, `torch.Tensor`]):
Dictionary of training statistics
"""
self.model.train()
loss_p, loss_v, train_stats = self.loss(
old_logprobs, values, logits, vpreds, logprobs, mask, advantages, returns
)
loss = loss_p + loss_v
self.accelerator.backward(loss)
if self.config.max_grad_norm is not None:
if self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(self.model_params, self.config.max_grad_norm)
self.optimizer.step()
# we call optimizer.zero_grad() every time and let `accelerator` handle accumulation
# see https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation#the-finished-code
self.optimizer.zero_grad()
return train_stats
def compute_rewards(
self,
scores: torch.FloatTensor,
logprobs: torch.FloatTensor,
ref_logprobs: torch.FloatTensor,
masks: torch.LongTensor,
):
"""
Compute per token rewards from scores and KL-penalty.
Args:
scores (`torch.FloatTensor`):
Scores from the reward model, shape (`batch_size`)
logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape (`batch_size`, `response_length`)
ref_logprobs (`torch.FloatTensor`):
Log probabilities of the reference model, shape (`batch_size`, `response_length`)
Returns:
`torch.FloatTensor`: Per token rewards, shape (`batch_size`, `response_length`)
`torch.FloatTensor`: Non score rewards, shape (`batch_size`, `response_length`)
`torch.FloatTensor`: KL penalty, shape (`batch_size`, `response_length`)
"""
rewards, non_score_rewards, kls = [], [], []
for score, logprob, ref_logprob, mask in zip(scores, logprobs, ref_logprobs, masks):
# compute KL penalty (from difference in logprobs)
kl = self._kl_penalty(logprob, ref_logprob)
kls.append(kl)
non_score_reward = -self.kl_ctl.value * kl
non_score_rewards.append(non_score_reward)
reward = non_score_reward.clone()
last_non_masked_index = mask.nonzero()[-1]
# reward is preference model score + KL penalty
reward[last_non_masked_index] += score
rewards.append(reward)
return torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls)
def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor:
if self.config.kl_penalty == "kl":
return logprob - ref_logprob
if self.config.kl_penalty == "abs":
return (logprob - ref_logprob).abs()
if self.config.kl_penalty == "mse":
return 0.5 * (logprob - ref_logprob).square()
if self.config.kl_penalty == "full":
# Flip is required due to this issue? :https://github.com/pytorch/pytorch/issues/57459
return F.kl_div(ref_logprob, logprob, log_target=True, reduction="none").sum(-1)
raise NotImplementedError
def compute_advantages(
self,
values: torch.FloatTensor,
rewards: torch.FloatTensor,
mask: torch.FloatTensor,
):
lastgaelam = 0
advantages_reversed = []
gen_len = rewards.shape[-1]
values = values * mask
rewards = rewards * mask
if self.config.whiten_rewards:
rewards = masked_whiten(rewards, mask, shift_mean=False)
for t in reversed(range(gen_len)):
nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0
delta = rewards[:, t] + self.config.gamma * nextvalues - values[:, t]
lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1)
returns = advantages + values
advantages = masked_whiten(advantages, mask)
advantages = advantages.detach()
return values, advantages, returns
def loss(
self,
old_logprobs: torch.FloatTensor,
values: torch.FloatTensor,
logits: torch.FloatTensor,
vpreds: torch.FloatTensor,
logprobs: torch.FloatTensor,
mask: torch.LongTensor,
advantages: torch.FloatTensor,
returns: torch.FloatTensor,
):
"""
Calculate policy and value losses.
Args:
old_logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape (`batch_size`, `response_length`)
values (`torch.FloatTensor`):
Values of the value head, shape (`batch_size`, `response_length`)
rewards (`torch.FloatTensor`):
Rewards from the reward model, shape (`batch_size`, `response_length`)
logits (`torch.FloatTensor`):
Logits of the model, shape (`batch_size`, `response_length`, `vocab_size`)
v_pred (`torch.FloatTensor`):
Values of the value head, shape (`batch_size`, `response_length`)
logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape (`batch_size`, `response_length`)
"""
vpredclipped = clip_by_value(
vpreds,
values - self.config.cliprange_value,
values + self.config.cliprange_value,
)
vf_losses1 = (vpreds - returns) ** 2
vf_losses2 = (vpredclipped - returns) ** 2
vf_loss = 0.5 * masked_mean(torch.max(vf_losses1, vf_losses2), mask)
vf_clipfrac = masked_mean(torch.gt(vf_losses2, vf_losses1).float(), mask)
ratio = torch.exp(logprobs - old_logprobs)
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - self.config.cliprange, 1.0 + self.config.cliprange)
pg_loss = masked_mean(torch.max(pg_losses, pg_losses2), mask)
pg_clipfrac = masked_mean(torch.gt(pg_losses2, pg_losses).float(), mask)
loss = pg_loss + self.config.vf_coef * vf_loss
avg_ratio = masked_mean(ratio, mask).item()
if avg_ratio > self.config.ratio_threshold:
warnings.warn(
f"The average ratio of batch ({avg_ratio:.2f}) exceeds threshold {self.config.ratio_threshold:.2f}. Skipping batch."
)
pg_loss = pg_loss * 0.0
vf_loss = vf_loss * 0.0
loss = loss * 0.0
entropy = masked_mean(entropy_from_logits(logits), mask)
approxkl = 0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask)
policykl = masked_mean(old_logprobs - logprobs, mask)
return_mean, return_var = masked_mean(returns, mask), masked_var(returns, mask)
value_mean, value_var = masked_mean(values, mask), masked_var(values, mask)
stats = dict(
loss=dict(policy=pg_loss.detach(), value=vf_loss.detach(), total=loss.detach()),
policy=dict(
entropy=entropy.detach(),
approxkl=approxkl.detach(),
policykl=policykl.detach(),
clipfrac=pg_clipfrac.detach(),
advantages=advantages.detach(),
advantages_mean=masked_mean(advantages, mask).detach(),
ratio=ratio.detach(),
),
returns=dict(mean=return_mean.detach(), var=return_var.detach()),
val=dict(
vpred=masked_mean(vpreds, mask).detach(),
error=masked_mean((vpreds - returns) ** 2, mask).detach(),
clipfrac=vf_clipfrac.detach(),
mean=value_mean.detach(),
var=value_var.detach(),
),
)
return pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats)
def record_step_stats(self, kl_coef: float, **data):
"""
Record training step statistics.
Args:
kl_coef (`float`):
KL coefficient
data (`dict`):
Dictionary of training step data
Returns:
stats (`dict`):
Dictionary of training step statistics
"""
mask = data.pop("masks")
kls = data.pop("kls")
kl_list = ((kls) * mask).sum(axis=-1)
mean_kl = kl_list.mean()
mean_entropy = (-data["logprobs"] * mask).sum(axis=-1).mean()
mean_non_score_reward = masked_mean(
data["non_score_reward"], mask
) # non_score_reward is size `batch_size`, `response_length`
mean_scores = data["scores"].mean() # scores is size `batch_size`
std_scores = data["scores"].std()
if mean_kl.item() < -1.0:
# warn users
warnings.warn(
f"KL divergence is starting to become negative: {mean_kl.item():.2f} - this might be a precursor for failed training."
" sometimes this happens because the generation kwargs are not correctly set. Please make sure"
" that the generation kwargs are set correctly, or review your training hyperparameters."
)
stats = {
"objective/kl": mean_kl,
"objective/kl_dist": kl_list,
"objective/logprobs": data["logprobs"],
"objective/ref_logprobs": data["ref_logprobs"],
"objective/kl_coef": kl_coef,
"objective/entropy": mean_entropy,
"ppo/mean_non_score_reward": mean_non_score_reward,
"ppo/mean_scores": mean_scores,
"ppo/std_scores": std_scores,
}
# Log text properties
query_lens = torch.tensor([len(query) for query in data["queries"]], dtype=torch.float)
response_lens = torch.tensor([len(response) for response in data["responses"]], dtype=torch.float)
stats["tokens/queries_len_mean"] = torch.mean(query_lens).cpu().numpy().item()
stats["tokens/queries_len_std"] = torch.std(query_lens).cpu().numpy().item()
stats["tokens/queries_dist"] = query_lens.cpu().numpy()
stats["tokens/responses_len_mean"] = torch.mean(response_lens).cpu().numpy().item()
stats["tokens/responses_len_std"] = torch.std(response_lens).cpu().numpy().item()
stats["tokens/responses_dist"] = response_lens.cpu().numpy()
for k, v in data["train_stats"].items():
stats[f"ppo/{k}"] = torch.mean(v, axis=0)
stats["ppo/val/var_explained"] = 1 - stats["ppo/val/error"] / stats["ppo/returns/var"]
return stats
def log_stats(
self,
stats: dict,
batch: dict,
rewards: List[torch.FloatTensor],
columns_to_log: List[str] = ["query", "response"],
):
"""
A function that logs all the training stats. Call it at the end of each epoch.
Args:
stats (dict[str, Any]):
A dictionary of training stats.
batch (dict[str, Any]):
A dictionary of batch data, this contains the queries and responses.
rewards (`List[torch.FloatTensor]`):
A tensor of rewards.
"""
# all gather stats
if not isinstance(rewards, torch.Tensor):
rewards = torch.tensor(rewards).to(self.current_device)
rewards = self.accelerator.gather(rewards).flatten()
if self.config.log_with == "wandb":
import wandb
if any([column_to_log not in batch.keys() for column_to_log in columns_to_log]):
raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.")
batch_list = [batch[column_to_log] for column_to_log in columns_to_log]
if self.is_distributed:
gathered_batch_list = []
for b in batch_list:
flattened = gather_object(b)
gathered_batch_list.append(flattened)
batch_list = gathered_batch_list
# Log only if we are in the main process
if self.accelerator.is_main_process:
logs = {}
# Log stats
if "query" not in batch.keys() and "response" not in batch.keys():
# warn the user that the game logs will not be logged
warnings.warn(
"The game logs will not be logged because the batch does not contain the keys 'query' and "
"'response'. "
)
elif self.config.log_with == "wandb":
table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())]
logs.update({"game_log": wandb.Table(columns=[*columns_to_log, "reward"], rows=table_rows)})
logs.update(stats)
# manually cast in fp32 for bf16 torch tensors
for k, v in logs.items():
if isinstance(v, torch.Tensor) and v.dtype == torch.bfloat16:
logs[k] = v.float()
logs["env/reward_mean"] = torch.mean(rewards).cpu().numpy().item()
logs["env/reward_std"] = torch.std(rewards).cpu().numpy().item()
logs["env/reward_dist"] = rewards.cpu().numpy()
if self.config.log_with == "tensorboard":
# update the current step
self.current_step += 1
self.accelerator.log(
logs,
step=self.current_step if self.config.log_with == "tensorboard" else None,
)
def create_model_card(self, path: str, model_name: Optional[str] = "TRL Model") -> None:
"""Creates and saves a model card for a TRL model.
Args:
path (`str`): The path to save the model card to.
model_name (`str`, *optional*): The name of the model, defaults to `TRL Model`.
"""
try:
user = whoami()["name"]
# handle the offline case
except: # noqa
warnings.warn("Cannot retrieve user information assuming you are running in offline mode.")
return
if not os.path.exists(path):
os.makedirs(path)
model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}")
with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f:
f.write(model_card_content)
def _save_pretrained(self, save_directory: str) -> None:
self.accelerator.unwrap_model(self.model).save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
self.create_model_card(save_directory)
def _show_tokens(self, tokens, masks):
from rich import print
from rich.text import Text
text = Text()
for i, (token, mask) in enumerate(zip(tokens, masks)):
if mask == 1:
text.append(self.tokenizer.decode(token.item()), style="black on deep_sky_blue1")
text.append(" ")
else:
text.append(self.tokenizer.decode(token.item()), style="black on cyan3")
text.append(" ")
print(text)
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
config_kwargs = deepspeed_plugin.deepspeed_config
if model is not None:
if hasattr(model, "config"):
hidden_size = (
max(model.config.hidden_sizes)
if getattr(model.config, "hidden_sizes", None)
else getattr(model.config, "hidden_size", None)
)
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
config_kwargs.update(
{
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
}
)
# If ZeRO-3 is used, we shard both the active and reference model.
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
if config_kwargs["zero_optimization"]["stage"] != 3:
config_kwargs["zero_optimization"]["stage"] = 0
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
model.eval()
return model
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/reward_config.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
from transformers import TrainingArguments
@dataclass
class RewardConfig(TrainingArguments):
"""
RewardConfig collects all training arguments related to the [`RewardTrainer`] class.
Using [`HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
max_length (`int`, *optional*, defaults to `None`):
The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.
gradient_checkpointing (`bool`, *optional*, defaults to `True`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
"""
max_length: Optional[int] = None
"""The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator."""
gradient_checkpointing: Optional[bool] = True
"""If True, use gradient checkpointing to save memory at the expense of slower backward pass."""
gradient_checkpointing_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the gradient checkpointing function."""
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/dpo_trainer.py | # DPO Authors: Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn 2023
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import random
import warnings
from collections import defaultdict
from contextlib import contextmanager, nullcontext
from copy import deepcopy
from functools import wraps
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from accelerate.utils import is_deepspeed_available, tqdm
from datasets import Dataset
from torch.utils.data import DataLoader
from transformers import (
AutoModelForCausalLM,
DataCollator,
PreTrainedModel,
PreTrainedTokenizerBase,
Trainer,
TrainingArguments,
)
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import EvalLoopOutput
from ..import_utils import is_peft_available, is_wandb_available
from ..models import PreTrainedModelWrapper, create_reference_model
from .utils import (
DPODataCollatorWithPadding,
disable_dropout_in_model,
pad_to_length,
peft_module_casting_to_bf16,
trl_sanitze_kwargs_for_tagging,
)
if is_peft_available():
from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training
if is_wandb_available():
import wandb
if is_deepspeed_available():
import deepspeed
class DPOTrainer(Trainer):
r"""
Initialize DPOTrainer.
Args:
model (`transformers.PreTrainedModel`):
The model to train, preferably an `AutoModelForSequenceClassification`.
ref_model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
beta (`float`, defaults to 0.1):
The beta factor in DPO loss. Higher beta means less divergence from the initial policy. For the IPO loss, beta is the regularization parameter denoted by tau in the paper.
label_smoothing (`float`, defaults to 0):
The robust DPO label smoothing parameter from the [cDPO](https://ericmitchell.ai/cdpo.pdf) report that should be between 0 and 0.5.
loss_type (`str`, defaults to `"sigmoid"`):
The type of DPO loss to use. Either `"sigmoid"` the default DPO loss,`"hinge"` loss from [SLiC](https://arxiv.org/abs/2305.10425) paper, `"ipo"` from [IPO](https://arxiv.org/abs/2310.12036) paper, or `"kto"` from the HALOs [report](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf).
args (`transformers.TrainingArguments`):
The arguments to use for training.
data_collator (`transformers.DataCollator`):
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
label_pad_token_id (`int`, defaults to `-100`):
The label pad token id. This argument is required if you want to use the default data collator.
padding_value (`int`, defaults to `0`):
The padding value if it is different to the tokenizer's pad_token_id.
truncation_mode (`str`, defaults to `keep_end`):
The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.
train_dataset (`datasets.Dataset`):
The dataset to use for training.
eval_dataset (`datasets.Dataset`):
The dataset to use for evaluation.
tokenizer (`transformers.PreTrainedTokenizerBase`):
The tokenizer to use for training. This argument is required if you want to use the default data collator.
model_init (`Callable[[], transformers.PreTrainedModel]`):
The model initializer to use for training. If None is specified, the default model initializer will be used.
callbacks (`List[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
max_length (`int`, defaults to `None`):
The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.
max_prompt_length (`int`, defaults to `None`):
The maximum length of the prompt. This argument is required if you want to use the default data collator.
max_target_length (`int`, defaults to `None`):
The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder.
peft_config (`Dict`, defaults to `None`):
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`):
If no model is provided, we need to know if the model_init returns an encoder-decoder.
disable_dropout (`bool`, defaults to `True`):
Whether or not to disable dropouts in `model` and `ref_model`.
generate_during_eval (`bool`, defaults to `False`):
Whether to sample and log generations during evaluation step.
compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
The function to use to compute the metrics. Must take a `EvalPrediction` and return
a dictionary string to metric values.
precompute_ref_log_probs (`bool`, defaults to `False`):
Flag to precompute reference model log probabilities and evaluation datasets. This is useful if you want to train
without the reference model and reduce the total GPU memory needed.
model_init_kwargs: (`Optional[Dict]`, *optional*):
Dict of Optional kwargs to pass when instantiating the model from a string
ref_model_init_kwargs: (`Optional[Dict]`, *optional*):
Dict of Optional kwargs to pass when instantiating the ref model from a string
model_adapter_name (`str`, defaults to `None`):
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
ref_adapter_name (`str`, defaults to `None`):
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
"""
_tag_names = ["trl", "dpo"]
def __init__(
self,
model: Union[PreTrainedModel, nn.Module, str] = None,
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
beta: float = 0.1,
label_smoothing: float = 0,
loss_type: Literal["sigmoid", "hinge", "ipo", "kto"] = "sigmoid",
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
label_pad_token_id: int = -100,
padding_value: int = None,
truncation_mode: str = "keep_end",
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
max_length: Optional[int] = None,
max_prompt_length: Optional[int] = None,
max_target_length: Optional[int] = None,
peft_config: Optional[Dict] = None,
is_encoder_decoder: Optional[bool] = None,
disable_dropout: bool = True,
generate_during_eval: bool = False,
compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None,
precompute_ref_log_probs: bool = False,
model_init_kwargs: Optional[Dict] = None,
ref_model_init_kwargs: Optional[Dict] = None,
model_adapter_name: str = None,
ref_adapter_name: str = None,
):
if model_init_kwargs is None:
model_init_kwargs = {}
elif not isinstance(model, str):
raise ValueError("You passed model_kwargs to the DPOTrainer. But your model is already instantiated.")
if ref_model_init_kwargs is None:
ref_model_init_kwargs = {}
elif not isinstance(ref_model, str):
raise ValueError(
"You passed ref_model_kwargs to the DPOTrainer. But your ref_model is already instantiated."
)
if isinstance(model, str):
warnings.warn(
"You passed a model_id to the DPOTrainer. This will automatically create an "
"`AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you."
)
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
if isinstance(ref_model, str):
warnings.warn(
"You passed a ref model_id to the DPOTrainer. This will automatically create an "
"`AutoModelForCausalLM`"
)
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
# has been called in order to properly call autocast if needed.
self._peft_has_been_casted_to_bf16 = False
if not is_peft_available() and peft_config is not None:
raise ValueError(
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
)
elif is_peft_available() and peft_config is not None:
# if model is a peft model and we have a peft_config, we merge and unload it first
if isinstance(model, PeftModel):
model = model.merge_and_unload()
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
_support_gc_kwargs = hasattr(
args, "gradient_checkpointing_kwargs"
) and "gradient_checkpointing_kwargs" in list(
inspect.signature(prepare_model_for_kbit_training).parameters
)
preprare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
if _support_gc_kwargs:
preprare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
model = prepare_model_for_kbit_training(model, **preprare_model_kwargs)
elif getattr(args, "gradient_checkpointing", False):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
# get peft model with the given config
model = get_peft_model(model, peft_config)
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
peft_module_casting_to_bf16(model)
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
self._peft_has_been_casted_to_bf16 = True
# For models that use gradient_checkpoiting, we need to attach a hook that enables input
# to explicitly have `requires_grad=True`, otherwise training will either silently
# fail or completely fail.
elif getattr(args, "gradient_checkpointing", False):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
if generate_during_eval and not is_wandb_available():
raise ValueError(
"`generate_during_eval=True` requires Weights and Biases to be installed."
" Please install `wandb` to resolve."
)
if model is not None:
self.is_encoder_decoder = model.config.is_encoder_decoder
elif is_encoder_decoder is None:
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
else:
self.is_encoder_decoder = is_encoder_decoder
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
self.model_adapter_name = model_adapter_name
self.ref_adapter_name = ref_adapter_name
if ref_model:
self.ref_model = ref_model
elif self.is_peft_model or precompute_ref_log_probs:
# The `model` with adapters turned off will be used as the reference model
self.ref_model = None
else:
self.ref_model = create_reference_model(model)
if tokenizer is None:
raise ValueError("tokenizer must be specified to tokenize a DPO dataset.")
if max_length is None:
warnings.warn(
"`max_length` is not set in the DPOTrainer's init"
" it will default to `512` by default, but you should do it yourself in the future.",
UserWarning,
)
max_length = 512
if max_prompt_length is None:
warnings.warn(
"`max_prompt_length` is not set in the DPOTrainer's init"
" it will default to `128` by default, but you should do it yourself in the future.",
UserWarning,
)
max_prompt_length = 128
if max_target_length is None and self.is_encoder_decoder:
warnings.warn(
"When using an encoder decoder architecture, you should set `max_target_length` in the DPOTrainer's init"
" it will default to `128` by default, but you should do it yourself in the future.",
UserWarning,
)
max_target_length = 128
if data_collator is None:
data_collator = DPODataCollatorWithPadding(
pad_token_id=tokenizer.pad_token_id,
label_pad_token_id=label_pad_token_id,
is_encoder_decoder=self.is_encoder_decoder,
)
if args.remove_unused_columns:
args.remove_unused_columns = False
# warn users
warnings.warn(
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments"
" we have set it for you, but you should do it yourself in the future.",
UserWarning,
)
self.use_dpo_data_collator = True
else:
self.use_dpo_data_collator = False
if disable_dropout:
disable_dropout_in_model(model)
if self.ref_model is not None:
disable_dropout_in_model(self.ref_model)
self.max_length = max_length
self.generate_during_eval = generate_during_eval
self.label_pad_token_id = label_pad_token_id
self.padding_value = padding_value if padding_value is not None else tokenizer.pad_token_id
self.max_prompt_length = max_prompt_length
self.truncation_mode = truncation_mode
self.max_target_length = max_target_length
self.tokenizer = tokenizer
self.precompute_ref_log_probs = precompute_ref_log_probs
# Since ref_logs are precomputed on the first call to get_train/eval_dataloader
# keep track of first called to avoid computation of future calls
self._precomputed_train_ref_log_probs = False
self._precomputed_eval_ref_log_probs = False
if loss_type in ["hinge", "ipo", "kto_pair"] and label_smoothing > 0:
warnings.warn(
"You are using a loss type that does not support label smoothing. Ignoring label_smoothing parameter."
)
self.beta = beta
self.label_smoothing = label_smoothing
self.loss_type = loss_type
self._stored_metrics = defaultdict(lambda: defaultdict(list))
# tokenize the dataset
train_dataset = train_dataset.map(self.tokenize_row)
if eval_dataset is not None:
eval_dataset = eval_dataset.map(self.tokenize_row)
super().__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
model_init=model_init,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
if not hasattr(self, "accelerator"):
raise AttributeError(
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
)
# Deepspeed Zero-3 does not support precompute_ref_log_probs
if self.is_deepspeed_enabled:
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
raise ValueError(
"You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
)
if self.ref_model is None:
if not (self.is_peft_model or self.precompute_ref_log_probs):
raise ValueError(
"No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
)
else:
if self.is_deepspeed_enabled:
self.ref_model = self._prepare_deepspeed(self.ref_model)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
if model is not None:
if hasattr(model, "config"):
hidden_size = (
max(model.config.hidden_sizes)
if getattr(model.config, "hidden_sizes", None)
else getattr(model.config, "hidden_size", None)
)
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
config_kwargs.update(
{
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
}
)
# If ZeRO-3 is used, we shard both the active and reference model.
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
if config_kwargs["zero_optimization"]["stage"] != 3:
config_kwargs["zero_optimization"]["stage"] = 0
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
model.eval()
return model
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
"""
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
dataloader_params = {
"batch_size": self.args.per_device_train_batch_size,
"collate_fn": self.data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"shuffle": False,
}
# prepare dataloader
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
reference_chosen_logps = []
reference_rejected_logps = []
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
reference_chosen_logp, reference_rejected_logp = self.compute_reference_log_probs(padded_batch)
reference_chosen_logp, reference_rejected_logp = self.accelerator.gather_for_metrics(
(reference_chosen_logp, reference_rejected_logp)
)
reference_chosen_logps.append(reference_chosen_logp.cpu())
reference_rejected_logps.append(reference_rejected_logp.cpu())
all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy()
all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy()
self.train_dataset = self.train_dataset.add_column(
name="reference_chosen_logps", column=all_reference_chosen_logps
)
self.train_dataset = self.train_dataset.add_column(
name="reference_rejected_logps", column=all_reference_rejected_logps
)
self._precomputed_train_ref_log_probs = True
return super().get_train_dataloader()
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
dataloader_params = {
"batch_size": self.args.per_device_eval_batch_size,
"collate_fn": self.data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"shuffle": False,
}
# prepare dataloader
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
reference_chosen_logps = []
reference_rejected_logps = []
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
reference_chosen_logp, reference_rejected_logp = self.compute_reference_log_probs(padded_batch)
reference_chosen_logp, reference_rejected_logp = self.accelerator.gather_for_metrics(
(reference_chosen_logp, reference_rejected_logp)
)
reference_chosen_logps.append(reference_chosen_logp.cpu())
reference_rejected_logps.append(reference_rejected_logp.cpu())
all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy()
all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy()
eval_dataset = eval_dataset.add_column(name="reference_chosen_logps", column=all_reference_chosen_logps)
eval_dataset = eval_dataset.add_column(
name="reference_rejected_logps", column=all_reference_rejected_logps
)
# Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs
if self.eval_dataset is not None:
self.eval_dataset = eval_dataset
self._precomputed_eval_ref_log_probs = True
return super().get_eval_dataloader(eval_dataset=eval_dataset)
def build_tokenized_answer(self, prompt, answer):
"""
Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`.
It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`.
Reference:
https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
"""
full_tokenized = self.tokenizer(prompt + answer, add_special_tokens=False)
prompt_input_ids = self.tokenizer(prompt, add_special_tokens=False)["input_ids"]
answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
# Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
# Prepare input tokens for token by token comparison
full_input_ids = np.array(full_tokenized["input_ids"])
if len(full_input_ids) != len(full_concat_input_ids):
raise ValueError("Prompt input ids and answer input ids should have the same length.")
# On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
# can be merged together when tokenizing prompt+answer. This could result
# on the last token from the prompt being different when tokenized on its own
# vs when done as prompt+answer.
response_token_ids_start_idx = len(prompt_input_ids)
# If tokenized prompt is different than both prompt+answer, then it means the
# last token has changed due to merging.
if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
response_token_ids_start_idx -= 1
prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
if len(prompt_input_ids) != len(prompt_attention_mask):
raise ValueError("Prompt input ids and attention mask should have the same length.")
answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
return dict(
prompt_input_ids=prompt_input_ids,
prompt_attention_mask=prompt_attention_mask,
input_ids=answer_input_ids,
attention_mask=answer_attention_mask,
)
def tokenize_row(self, feature, model: Union[PreTrainedModel, nn.Module] = None) -> Dict:
"""Tokenize a single row from a DPO specific dataset.
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
in case the prompt + chosen or prompt + rejected responses is/are too long. First
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
We also create the labels for the chosen/rejected responses, which are of length equal to
the sum of the length of the prompt and the chosen/rejected response, with
label_pad_token_id for the prompt tokens.
"""
batch = {}
prompt = feature["prompt"]
chosen = feature["chosen"]
rejected = feature["rejected"]
if not self.is_encoder_decoder:
# Check issues below for more details
# 1. https://github.com/huggingface/trl/issues/907
# 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
# 3. https://github.com/LianjiaTech/BELLE/issues/337
if not isinstance(prompt, str):
raise ValueError(f"prompt should be an str but got {type(prompt)}")
prompt_tokens = self.tokenizer(prompt, add_special_tokens=False)
prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
if not isinstance(chosen, str):
raise ValueError(f"chosen should be an str but got {type(chosen)}")
chosen_tokens = self.build_tokenized_answer(prompt, chosen)
if not isinstance(rejected, str):
raise ValueError(f"rejected should be an str but got {type(rejected)}")
rejected_tokens = self.build_tokenized_answer(prompt, rejected)
# Last prompt token might get merged by tokenizer and
# it should not be included for generation if that happens
prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
for k, v in prompt_tokens.items():
prompt_tokens[k] = v[:prompt_len_input_ids]
# Make sure prompts only have one different token at most an
# and length only differs by 1 at most
num_diff_tokens = sum(
[a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])]
)
num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
if num_diff_tokens > 1 or num_diff_len > 1:
raise ValueError(
"Chosen and rejected prompt_input_ids might only differ on the "
"last token due to tokenizer merge ops."
)
# add BOS token to head of prompt
prompt_tokens["prompt_input_ids"] = [self.tokenizer.bos_token_id] + prompt_tokens["prompt_input_ids"]
chosen_tokens["prompt_input_ids"] = [self.tokenizer.bos_token_id] + chosen_tokens["prompt_input_ids"]
rejected_tokens["prompt_input_ids"] = [self.tokenizer.bos_token_id] + rejected_tokens["prompt_input_ids"]
prompt_tokens["prompt_attention_mask"] = [1] + prompt_tokens["prompt_attention_mask"]
chosen_tokens["prompt_attention_mask"] = [1] + chosen_tokens["prompt_attention_mask"]
rejected_tokens["prompt_attention_mask"] = [1] + rejected_tokens["prompt_attention_mask"]
# add EOS token to end of answer
chosen_tokens["input_ids"].append(self.tokenizer.eos_token_id)
chosen_tokens["attention_mask"].append(1)
rejected_tokens["input_ids"].append(self.tokenizer.eos_token_id)
rejected_tokens["attention_mask"].append(1)
longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
# if combined sequence is too long, truncate the prompt
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
if self.truncation_mode == "keep_start":
for k in ["prompt_input_ids", "prompt_attention_mask"]:
answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
elif self.truncation_mode == "keep_end":
for k in ["prompt_input_ids", "prompt_attention_mask"]:
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
else:
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
# if that's still too long, truncate the response
for answer_tokens in [chosen_tokens, rejected_tokens]:
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
for k in ["input_ids", "attention_mask"]:
answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
# Create labels
chosen_sequence_tokens = {
k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]
}
rejected_sequence_tokens = {
k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]
}
chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [
self.label_pad_token_id
] * len(chosen_tokens["prompt_input_ids"])
rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [
self.label_pad_token_id
] * len(rejected_tokens["prompt_input_ids"])
for k, toks in {
"chosen_": chosen_sequence_tokens,
"rejected_": rejected_sequence_tokens,
"": prompt_tokens,
}.items():
for type_key, tokens in toks.items():
if type_key == "token_type_ids":
continue
batch[f"{k}{type_key}"] = tokens
else:
chosen_tokens = self.tokenizer(
chosen, truncation=True, max_length=self.max_target_length, add_special_tokens=True
)
rejected_tokens = self.tokenizer(
rejected, truncation=True, max_length=self.max_target_length, add_special_tokens=True
)
prompt_tokens = self.tokenizer(
prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True
)
batch["chosen_labels"] = chosen_tokens["input_ids"]
batch["rejected_labels"] = rejected_tokens["input_ids"]
batch["prompt_input_ids"] = prompt_tokens["input_ids"]
batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
labels=batch["rejected_labels"]
)
batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
labels=batch["chosen_labels"]
)
return batch
@contextmanager
def null_ref_context(self):
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
with self.accelerator.unwrap_model(
self.model
).disable_adapter() if self.is_peft_model and not self.ref_adapter_name else nullcontext():
if self.ref_adapter_name:
self.model.set_adapter(self.ref_adapter_name)
yield
if self.ref_adapter_name:
self.model.set_adapter(self.model_adapter_name or "default")
def compute_reference_log_probs(self, padded_batch: Dict) -> Dict:
"""Computes log probabilities of the reference model for a single padded batch of a DPO specific dataset."""
compte_ref_context_manager = torch.cuda.amp.autocast if self._peft_has_been_casted_to_bf16 else nullcontext
# compute reference logps
with torch.no_grad(), compte_ref_context_manager():
if self.ref_model is None:
with self.null_ref_context():
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
) = self.concatenated_forward(self.model, padded_batch)
else:
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
) = self.concatenated_forward(self.ref_model, padded_batch)
return reference_chosen_logps, reference_rejected_logps
@staticmethod
def concatenated_inputs(
batch: Dict[str, Union[List, torch.LongTensor]],
is_encoder_decoder: bool = False,
label_pad_token_id: int = -100,
padding_value: int = 0,
device: Optional[torch.device] = None,
) -> Dict[str, torch.LongTensor]:
"""Concatenate the chosen and rejected inputs into a single tensor.
Args:
batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
is_encoder_decoder: Whether the model is an encoder-decoder model.
label_pad_token_id: The label pad token id.
padding_value: The padding value to use for the concatenated inputs_ids.
device: The device for the concatenated inputs.
Returns:
A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
"""
concatenated_batch = {}
if is_encoder_decoder:
max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
else:
max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
for k in batch:
if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
if "labels" in k or is_encoder_decoder:
pad_value = label_pad_token_id
elif k.endswith("_input_ids"):
pad_value = padding_value
elif k.endswith("_attention_mask"):
pad_value = 0
concatenated_key = k.replace("chosen", "concatenated")
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
for k in batch:
if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
if "labels" in k or is_encoder_decoder:
pad_value = label_pad_token_id
elif k.endswith("_input_ids"):
pad_value = padding_value
elif k.endswith("_attention_mask"):
pad_value = 0
concatenated_key = k.replace("rejected", "concatenated")
concatenated_batch[concatenated_key] = torch.cat(
(
concatenated_batch[concatenated_key],
pad_to_length(batch[k], max_length, pad_value=pad_value),
),
dim=0,
).to(device=device)
if is_encoder_decoder:
concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
concatenated_batch["concatenated_attention_mask"] = (
batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
)
return concatenated_batch
def dpo_loss(
self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor,
reference_free: bool = False,
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the DPO loss for a batch of policy and reference model log probabilities.
Args:
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)
reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.
Returns:
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
The losses tensor contains the DPO loss for each example in the batch.
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
"""
pi_logratios = policy_chosen_logps - policy_rejected_logps
if reference_free:
ref_logratios = 0
else:
ref_logratios = reference_chosen_logps - reference_rejected_logps
pi_logratios = pi_logratios.to(self.accelerator.device)
ref_logratios = ref_logratios.to(self.accelerator.device)
logits = pi_logratios - ref_logratios
# The beta is a temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5.
# We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the labels and
# calculates a conservative DPO loss.
if self.loss_type == "sigmoid":
losses = (
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
- F.logsigmoid(-self.beta * logits) * self.label_smoothing
)
elif self.loss_type == "hinge":
losses = torch.relu(1 - self.beta * logits)
elif self.loss_type == "ipo":
# eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper.
losses = (logits - 1 / (2 * self.beta)) ** 2
elif self.loss_type == "kto_pair":
# eqn (7) of the HALOs paper
chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0)
rejected_KL = (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0)
chosen_logratios = policy_chosen_logps - reference_chosen_logps
rejected_logratios = policy_rejected_logps - reference_rejected_logps
# As described in the KTO report, the KL term for chosen (rejected) is estimated using the rejected (chosen) half.
losses = torch.cat(
(
1 - F.sigmoid(self.beta * (chosen_logratios - rejected_KL)),
1 - F.sigmoid(self.beta * (chosen_KL - rejected_logratios)),
),
0,
)
else:
raise ValueError(
f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'kto_pair']"
)
chosen_rewards = (
self.beta
* (
policy_chosen_logps.to(self.accelerator.device) - reference_chosen_logps.to(self.accelerator.device)
).detach()
)
rejected_rewards = (
self.beta
* (
policy_rejected_logps.to(self.accelerator.device)
- reference_rejected_logps.to(self.accelerator.device)
).detach()
)
return losses, chosen_rewards, rejected_rewards
@staticmethod
def get_batch_logps(
logits: torch.FloatTensor,
labels: torch.LongTensor,
average_log_prob: bool = False,
label_pad_token_id: int = -100,
is_encoder_decoder: bool = False,
) -> torch.FloatTensor:
"""Compute the log probabilities of the given labels under the given logits.
Args:
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
Returns:
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
"""
if logits.shape[:-1] != labels.shape:
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
if not is_encoder_decoder:
labels = labels[:, 1:].clone()
logits = logits[:, :-1, :]
loss_mask = labels != label_pad_token_id
# dummy token; we'll ignore the losses on these tokens later
labels[labels == label_pad_token_id] = 0
per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
if average_log_prob:
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
else:
return (per_token_logps * loss_mask).sum(-1)
def concatenated_forward(
self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
We do this to avoid doing two forward passes, because it's faster for FSDP.
"""
concatenated_batch = self.concatenated_inputs(
batch,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
padding_value=self.padding_value,
device=self.accelerator.device,
)
len_chosen = batch["chosen_labels"].shape[0]
model_kwargs = (
{
"labels": concatenated_batch["concatenated_labels"],
"decoder_input_ids": concatenated_batch.pop("concatenated_decoder_input_ids", None),
}
if self.is_encoder_decoder
else {}
)
all_logits = model(
concatenated_batch["concatenated_input_ids"],
attention_mask=concatenated_batch["concatenated_attention_mask"],
**model_kwargs,
).logits
all_logps = self.get_batch_logps(
all_logits,
concatenated_batch["concatenated_labels"],
average_log_prob=False,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
)
chosen_logps = all_logps[:len_chosen]
rejected_logps = all_logps[len_chosen:]
chosen_logits = all_logits[:len_chosen]
rejected_logits = all_logits[len_chosen:]
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits)
def get_batch_loss_metrics(
self,
model,
batch: Dict[str, Union[List, torch.LongTensor]],
train_eval: Literal["train", "eval"] = "train",
):
"""Compute the DPO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
(
policy_chosen_logps,
policy_rejected_logps,
policy_chosen_logits,
policy_rejected_logits,
) = self.concatenated_forward(model, batch)
# if reference_chosen_logps and reference_rejected_logps in batch use them, otherwise use the reference model
if "reference_chosen_logps" in batch and "reference_rejected_logps" in batch:
reference_chosen_logps = batch["reference_chosen_logps"]
reference_rejected_logps = batch["reference_rejected_logps"]
else:
with torch.no_grad():
if self.ref_model is None:
with self.null_ref_context():
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
) = self.concatenated_forward(self.model, batch)
else:
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
) = self.concatenated_forward(self.ref_model, batch)
losses, chosen_rewards, rejected_rewards = self.dpo_loss(
policy_chosen_logps,
policy_rejected_logps,
reference_chosen_logps,
reference_rejected_logps,
)
reward_accuracies = (chosen_rewards > rejected_rewards).float()
prefix = "eval_" if train_eval == "eval" else ""
metrics[f"{prefix}rewards/chosen"] = chosen_rewards.mean().cpu()
metrics[f"{prefix}rewards/rejected"] = rejected_rewards.mean().cpu()
metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.mean().cpu()
metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).mean().cpu()
metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().mean().cpu()
metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().mean().cpu()
metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().mean().cpu()
metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().mean().cpu()
return losses.mean(), metrics
def compute_loss(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: Dict[str, Union[torch.Tensor, Any]],
return_outputs=False,
) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
if not self.use_dpo_data_collator:
warnings.warn(
"compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than "
"DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator"
)
compute_loss_context_manager = torch.cuda.amp.autocast if self._peft_has_been_casted_to_bf16 else nullcontext
with compute_loss_context_manager():
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
# force log the metrics
self.store_metrics(metrics, train_eval="train")
if return_outputs:
return (loss, metrics)
return loss
def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]:
"""Generate samples from the model and reference model for the given batch of inputs."""
# If one uses `generate_during_eval` with peft + bf16, we need to explictly call generate with
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
generate_context_manager = nullcontext if not self._peft_has_been_casted_to_bf16 else torch.cuda.amp.autocast
with generate_context_manager():
policy_output = model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.tokenizer.pad_token_id,
)
# if reference_output in batch use that otherwise use the reference model
if "reference_output" in batch:
reference_output = batch["reference_output"]
else:
if self.ref_model is None:
with self.null_ref_context():
reference_output = self.model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.tokenizer.pad_token_id,
)
else:
reference_output = self.ref_model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.tokenizer.pad_token_id,
)
policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id)
policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True)
reference_output = pad_to_length(reference_output, self.max_length, self.tokenizer.pad_token_id)
reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True)
return policy_output_decoded, reference_output_decoded
def prediction_step(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
):
if not self.use_dpo_data_collator:
warnings.warn(
"prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than "
"DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator"
)
if ignore_keys is None:
if hasattr(model, "config"):
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
prediction_context_manager = torch.cuda.amp.autocast if self._peft_has_been_casted_to_bf16 else nullcontext
with torch.no_grad(), prediction_context_manager():
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
# force log the metrics
self.store_metrics(metrics, train_eval="eval")
if prediction_loss_only:
return (loss.detach(), None, None)
# logits for the chosen and rejected samples from model
logits_dict = {
"eval_logits/chosen": metrics["eval_logits/chosen"],
"eval_logits/rejected": metrics["eval_logits/rejected"],
}
logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
return (loss.detach(), logits, labels)
def store_metrics(self, metrics: Dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
for key, value in metrics.items():
self._stored_metrics[train_eval][key].append(value)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Overriding built-in evaluation loop to store metrics for each batch.
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
# Sample and save to game log if requested (for one batch to save time)
if self.generate_during_eval:
# Generate random indices within the range of the total number of samples
num_samples = len(dataloader.dataset)
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
random_batch_dataset = dataloader.dataset.select(random_indices)
random_batch = self.data_collator(random_batch_dataset)
random_batch = self._prepare_inputs(random_batch)
policy_output_decoded, ref_output_decoded = self.get_batch_samples(self.model, random_batch)
self.log(
{
"game_log": wandb.Table(
columns=["Prompt", "Policy", "Ref Model"],
rows=[
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
for prompt, pol, ref in zip(
random_batch["prompt"], policy_output_decoded, ref_output_decoded
)
],
)
}
)
self.state.log_history.pop()
# Base evaluation
initial_output = super().evaluation_loop(
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
)
return initial_output
def log(self, logs: Dict[str, float]) -> None:
"""
Log `logs` on the various objects watching training, including stored metrics.
Args:
logs (`Dict[str, float]`):
The values to log.
"""
# logs either has 'loss' or 'eval_loss'
train_eval = "train" if "loss" in logs else "eval"
# Add averaged stored metrics to logs
for key, metrics in self._stored_metrics[train_eval].items():
logs[key] = torch.tensor(metrics).mean().item()
del self._stored_metrics[train_eval]
return super().log(logs)
@wraps(Trainer.push_to_hub)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
"""
Overwrite the `push_to_hub` method in order to force-add the tag "sft" when pushing the
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
"""
kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs)
return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/reward_trainer.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import warnings
from dataclasses import FrozenInstanceError, replace
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from datasets import Dataset
from transformers import DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_pt_utils import nested_detach
from transformers.trainer_utils import EvalPrediction
from ..import_utils import is_peft_available
from .reward_config import RewardConfig
from .utils import RewardDataCollatorWithPadding, compute_accuracy
if is_peft_available():
from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training
class RewardTrainer(Trainer):
r"""
The RewardTrainer can be used to train your custom Reward Model. It is a subclass of the
`transformers.Trainer` class and inherits all of its attributes and methods. It is recommended to use
an `AutoModelForSequenceClassification` as the reward model. The reward model should be trained on a dataset
of paired examples, where each example is a tuple of two sequences. The reward model should be trained to
predict which example in the pair is more relevant to the task at hand.
The reward trainer expects a very specific format for the dataset. The dataset should contain two 4 entries at least
if you don't use the default `RewardDataCollatorWithPadding` data collator. The entries should be named
- `input_ids_chosen`
- `attention_mask_chosen`
- `input_ids_rejected`
- `attention_mask_rejected`
Optionally, you can also pass a `margin` entry to the dataset. This entry should contain the margin used to modulate the
loss of the reward model as outlined in https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/.
If you don't pass a margin, no margin will be used.
"""
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: Optional[RewardConfig] = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (
None,
None,
),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
max_length: Optional[int] = None,
peft_config: Optional[Dict] = None,
):
"""
Initialize RewardTrainer.
Args:
model (`transformers.PreTrainedModel`):
The model to train, preferably an `AutoModelForSequenceClassification`.
args (`RewardConfig`):
The arguments to use for training.
data_collator (`transformers.DataCollator`):
The data collator to use for training. If None is specified, the default data collator (`RewardDataCollatorWithPadding`) will be used
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
train_dataset (`datasets.Dataset`):
The dataset to use for training.
eval_dataset (`datasets.Dataset`):
The dataset to use for evaluation.
tokenizer (`transformers.PreTrainedTokenizerBase`):
The tokenizer to use for training. This argument is required if you want to use the default data collator.
model_init (`Callable[[], transformers.PreTrainedModel]`):
The model initializer to use for training. If None is specified, the default model initializer will be used.
compute_metrics (`Callable[[transformers.EvalPrediction], Dict]`, *optional* defaults to `compute_accuracy`):
The metrics to use for evaluation. If no metrics are specified, the default metric (`compute_accuracy`) will be used.
callbacks (`List[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
peft_config (`Dict`, defaults to `None`):
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
"""
if type(args) == TrainingArguments:
warnings.warn(
"Using `transformers.TrainingArguments` for `args` is deprecated and will be removed in a future version. Please use `RewardConfig` instead.",
FutureWarning,
)
if max_length is not None:
warnings.warn(
"The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.",
FutureWarning,
)
else:
if max_length is not None and args.max_length is not None:
raise ValueError(
"You cannot specify both `max_length` and `args.max_length`. Please use the `RewardConfig` to set `max_length` once."
)
if max_length is not None and args.max_length is None:
warnings.warn(
"The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.",
FutureWarning,
)
if not is_peft_available() and peft_config is not None:
raise ValueError(
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
)
elif is_peft_available() and peft_config is not None:
if not isinstance(model, PeftModel):
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_quantized", False):
_supports_gc_kwargs = "gradient_checkpointing_kwargs" in list(
inspect.signature(prepare_model_for_kbit_training).parameters
)
preprare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
if not _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None:
warnings.warn(
"You passed `gradient_checkpointing_kwargs` in the trainer's kwargs, but your peft version does not support it. "
"please update to the latest version of peft to use `gradient_checkpointing_kwargs`."
)
elif _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None:
preprare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
model = prepare_model_for_kbit_training(model, **preprare_model_kwargs)
model = get_peft_model(model, peft_config)
if compute_metrics is None:
compute_metrics = compute_accuracy
if data_collator is None:
if tokenizer is None:
raise ValueError(
"max_length or a tokenizer must be specified when using the default RewardDataCollatorWithPadding"
)
if type(args) == TrainingArguments:
if max_length is None:
warnings.warn(
"When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig."
" It will be set to `512` by default, but you should do it yourself in the future.",
UserWarning,
)
max_length = 512
else:
if max_length is None and args.max_length is None:
warnings.warn(
"When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig."
" It will be set to `512` by default, but you should do it yourself in the future.",
UserWarning,
)
max_length = 512
if max_length is None and args.max_length is not None:
max_length = args.max_length
data_collator = RewardDataCollatorWithPadding(tokenizer, max_length=max_length)
if args.remove_unused_columns:
try: # for bc before https://github.com/huggingface/transformers/pull/25435
args.remove_unused_columns = False
except FrozenInstanceError:
args = replace(args, remove_unused_columns=False)
# warn users
warnings.warn(
"When using RewardDataCollatorWithPadding, you should set `remove_unused_columns=False` in your RewardConfig"
" we have set it for you, but you should do it yourself in the future.",
UserWarning,
)
self.use_reward_data_collator = True
else:
self.use_reward_data_collator = False
super().__init__(
model,
args,
data_collator,
train_dataset,
eval_dataset,
tokenizer,
model_init,
compute_metrics,
callbacks,
optimizers,
preprocess_logits_for_metrics,
)
def compute_loss(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: Dict[str, Union[torch.Tensor, Any]],
return_outputs=False,
) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
if not self.use_reward_data_collator:
warnings.warn(
"The current compute_loss is implemented for RewardDataCollatorWithPadding,"
" if you are using a custom data collator make sure you know what you are doing or"
" implement your own compute_loss method."
)
rewards_chosen = model(
input_ids=inputs["input_ids_chosen"],
attention_mask=inputs["attention_mask_chosen"],
return_dict=True,
)["logits"]
rewards_rejected = model(
input_ids=inputs["input_ids_rejected"],
attention_mask=inputs["attention_mask_rejected"],
return_dict=True,
)["logits"]
# calculate loss, optionally modulate with margin
if "margin" in inputs:
loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs["margin"]).mean()
else:
loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean()
if return_outputs:
return loss, {
"rewards_chosen": rewards_chosen,
"rewards_rejected": rewards_rejected,
}
return loss
def prediction_step(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
loss, logits_dict = self.compute_loss(model, inputs, return_outputs=True)
if prediction_loss_only:
return (loss, None, None)
loss = loss.detach()
logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys)
logits = nested_detach(logits)
# Stack accepted against rejected, mean over logits
# and softmax to get preferences between accepted and rejected to sum to 1
logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T
labels = torch.zeros(logits.shape[0])
labels = self._prepare_inputs(labels)
return loss, logits, labels
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/__init__.py | # flake8: noqa
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There is a circular import in the PPOTrainer if we let isort sort these
# isort: off
from .utils import (
AdaptiveKLController,
FixedKLController,
ConstantLengthDataset,
DataCollatorForCompletionOnlyLM,
RunningMoments,
disable_dropout_in_model,
peft_module_casting_to_bf16,
)
# isort: on
from ..import_utils import is_diffusers_available
from .base import BaseTrainer
from .ddpo_config import DDPOConfig
if is_diffusers_available():
from .ddpo_trainer import DDPOTrainer
from .dpo_trainer import DPOTrainer
from .iterative_sft_trainer import IterativeSFTTrainer
from .ppo_config import PPOConfig
from .ppo_trainer import PPOTrainer
from .reward_config import RewardConfig
from .reward_trainer import RewardTrainer, compute_accuracy
from .sft_trainer import SFTTrainer
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/utils.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from collections import deque
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import IterableDataset
from transformers import DataCollatorForLanguageModeling, PreTrainedTokenizerBase
from ..import_utils import is_unsloth_available
class AdaptiveKLController:
"""
Adaptive KL controller described in the paper:
https://arxiv.org/pdf/1909.08593.pdf
"""
def __init__(self, init_kl_coef, target, horizon):
self.value = init_kl_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps):
target = self.target
proportional_error = np.clip(current / target - 1, -0.2, 0.2)
mult = 1 + proportional_error * n_steps / self.horizon
self.value *= mult
class FixedKLController:
"""Fixed KL controller."""
def __init__(self, kl_coef):
self.value = kl_coef
def update(self, current, n_steps):
pass
class DataCollatorForCompletionOnlyLM(DataCollatorForLanguageModeling):
"""
Data collator used for completion tasks. It ensures that all the tokens of the labels are set to an 'ignore_index'
when they do not come from the assistant. This ensure that the loss is only
calculated on the completion made by the assistant.
Args:
response_template (`Union[str, List[int]]`): the template form that indicates the start of the response, typically something like
'### Response:\n'. It can also be passed as tokenized ids, which can be useful when using a tokenizer that encodes the response
differently if it does not have proper context.
instruction_template (`Union[str, List[int]]`): the template form that indicates the start of the human instruction, typically something like
'### Human:\n'. Useful for assistant-style conversation datasets. It can also be passed as tokenized ids.
mlm (`bool`, *optional*, defaults to `False`): Whether or not to use masked language modeling in the underlying
`DataCollatorForLanguageModeling` class. Note that this option currently has no effect but is present
for flexibility and backwards-compatibility.
ignore_index (`int`, *optional*, defaults to `-100`):
The index to use to ignore the initial tokens with
"""
def __init__(
self,
response_template: Union[str, List[int]],
instruction_template: Union[str, List[int]] = None,
*args,
mlm: bool = False,
ignore_index: int = -100,
**kwargs,
):
super().__init__(*args, mlm=mlm, **kwargs)
self.instruction_template = instruction_template
if isinstance(instruction_template, str):
# The user provides a string, must tokenize
self.instruction_token_ids = self.tokenizer.encode(self.instruction_template, add_special_tokens=False)
else:
# The user already provides the token ids
self.instruction_token_ids = instruction_template
self.response_template = response_template
if isinstance(response_template, str):
# The user provides a string, must tokenize
self.response_token_ids = self.tokenizer.encode(self.response_template, add_special_tokens=False)
else:
# The user already provides the token ids
self.response_token_ids = response_template
if not self.mlm and self.instruction_template and self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:
warnings.warn(
"The pad_token_id and eos_token_id values of this tokenizer are identical. "
"If you are planning for multi-turn training, "
"it can result in the model continuously generating questions and answers without eos token. "
"To avoid this, set the pad_token_id to a different value."
)
self.ignore_index = ignore_index
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
batch = super().torch_call(examples)
if self.instruction_template is None:
for i in range(len(examples)):
response_token_ids_start_idx = None
for idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]:
# `response_token_ids` is `'### Response:\n'`, here we are just making sure that the token IDs match
if (
self.response_token_ids
== batch["labels"][i][idx : idx + len(self.response_token_ids)].tolist()
):
response_token_ids_start_idx = idx
if response_token_ids_start_idx is None:
warnings.warn(
f"Could not find response key `{self.response_template}` in the "
f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} '
f"This instance will be ignored in loss calculation. "
f"Note, if this happens often, consider increasing the `max_seq_length`."
)
batch["labels"][i, :] = self.ignore_index
else:
response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids)
# Make pytorch loss function ignore all tokens up through the end of the response key
batch["labels"][i, :response_token_ids_end_idx] = self.ignore_index
else:
for i in range(len(examples)):
response_token_ids_idxs = []
human_token_ids_idxs = []
for assistant_idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]:
# find the indexes of the start of a response.
if (
self.response_token_ids
== batch["labels"][i][assistant_idx : assistant_idx + len(self.response_token_ids)].tolist()
):
response_token_ids_idxs.append(assistant_idx + len(self.response_token_ids))
if len(response_token_ids_idxs) == 0:
warnings.warn(
f"Could not find response key `{self.response_template}` in the "
f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} '
f"This instance will be ignored in loss calculation. "
f"Note, if this happens often, consider increasing the `max_seq_length`."
)
batch["labels"][i, :] = self.ignore_index
human_token_ids = self.instruction_token_ids
for human_idx in np.where(batch["labels"][i] == human_token_ids[0])[0]:
# find the indexes of the start of a human answer.
if human_token_ids == batch["labels"][i][human_idx : human_idx + len(human_token_ids)].tolist():
human_token_ids_idxs.append(human_idx)
if len(human_token_ids_idxs) == 0:
warnings.warn(
f"Could not find instruction key `{self.instruction_template}` in the "
f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} '
f"This instance will be ignored in loss calculation. "
f"Note, if this happens often, consider increasing the `max_seq_length`."
)
batch["labels"][i, :] = self.ignore_index
if (
len(human_token_ids_idxs) > 0
and len(response_token_ids_idxs) > 0
and human_token_ids_idxs[0] > response_token_ids_idxs[0]
):
human_token_ids_idxs = [0] + human_token_ids_idxs
for idx, (start, end) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)):
# Make pytorch loss function ignore all non response tokens
if idx != 0:
batch["labels"][i, start:end] = self.ignore_index
else:
batch["labels"][i, :end] = self.ignore_index
if len(response_token_ids_idxs) < len(human_token_ids_idxs):
batch["labels"][i, human_token_ids_idxs[-1] :] = self.ignore_index
return batch
@dataclass
class RewardDataCollatorWithPadding:
r"""
Reward DataCollator class that pads the inputs to the maximum length of the batch.
Args:
tokenizer (`PreTrainedTokenizerBase`):
The tokenizer used for encoding the data.
padding (`Union[bool, str, `PaddingStrategy`]`, `optional`, defaults to `True`):
padding_strategy to pass to the tokenizer.
max_length (`Optional[int]`, `optional`, defaults to `None`):
The maximum length of the sequence to be processed.
pad_to_multiple_of (`Optional[int]`, `optional`, defaults to `None`):
If set will pad the sequence to a multiple of the provided value.
return_tensors (`str`, `optional`, defaults to `"pt"`):
The tensor type to use.
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
features_chosen = []
features_rejected = []
margin = []
# check if we have a margin. If we do, we need to batch it as well
has_margin = "margin" in features[0]
for feature in features:
# check if the keys are named as expected
if (
"input_ids_chosen" not in feature
or "input_ids_rejected" not in feature
or "attention_mask_chosen" not in feature
or "attention_mask_rejected" not in feature
):
raise ValueError(
"The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`"
)
features_chosen.append(
{
"input_ids": feature["input_ids_chosen"],
"attention_mask": feature["attention_mask_chosen"],
}
)
features_rejected.append(
{
"input_ids": feature["input_ids_rejected"],
"attention_mask": feature["attention_mask_rejected"],
}
)
if has_margin:
margin.append(feature["margin"])
batch_chosen = self.tokenizer.pad(
features_chosen,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch_rejected = self.tokenizer.pad(
features_rejected,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch = {
"input_ids_chosen": batch_chosen["input_ids"],
"attention_mask_chosen": batch_chosen["attention_mask"],
"input_ids_rejected": batch_rejected["input_ids"],
"attention_mask_rejected": batch_rejected["attention_mask"],
"return_loss": True,
}
if has_margin:
margin = torch.tensor(margin, dtype=torch.float)
batch["margin"] = margin
return batch
@dataclass
class DPODataCollatorWithPadding:
r"""
DPO DataCollator class that pads the tokenized inputs to the maximum length of the batch.
Args:
pad_token_id (`int` defaults to 0):
The tokenizer's pad_token_id.
label_pad_token_id (`int`, defaults to -100):
The label used for masking.
is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`):
Whether or not you model has an encoder_decoder architecture.
"""
pad_token_id: int = 0
label_pad_token_id: int = -100
is_encoder_decoder: Optional[bool] = False
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
# first, pad everything to the same length
padded_batch = {}
for k in features[0].keys():
if k.endswith("_input_ids") or k.endswith("_attention_mask") or k.endswith("_labels"):
if self.is_encoder_decoder:
to_pad = [torch.LongTensor(ex[k]) for ex in features]
if (k.startswith("prompt")) and (k.endswith("input_ids")):
if self.pad_token_id is None:
raise ValueError(
"Padding is enabled, but the tokenizer is not configured with a padding token."
" Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)"
" before calling the trainer."
)
padding_value = self.pad_token_id
elif k.endswith("_attention_mask"):
padding_value = 0
elif (k.startswith("chosen")) or (k.startswith("rejected")) or ("decoder" in k):
padding_value = self.label_pad_token_id
else:
raise ValueError(f"Unexpected key in batch '{k}'")
padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value)
else:
# adapted from https://stackoverflow.com/questions/73256206
if "prompt" in k:
to_pad = [torch.LongTensor(ex[k][::-1]) for ex in features]
else:
to_pad = [torch.LongTensor(ex[k]) for ex in features]
if k.endswith("_input_ids"):
if self.pad_token_id is None:
raise ValueError(
"Padding is enabled, but the tokenizer is not configured with a padding token."
" Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)"
" before calling the trainer."
)
padding_value = self.pad_token_id
elif k.endswith("_labels"):
padding_value = self.label_pad_token_id
elif k.endswith("_attention_mask"):
padding_value = 0
else:
raise ValueError(f"Unexpected key in batch '{k}'")
padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value)
# for the prompt, flip back so padding is on left side
if "prompt" in k:
padded_batch[k] = padded_batch[k].flip(dims=[1])
elif k.endswith("_logps"):
# the cached reference model logprobs
padded_batch[k] = torch.tensor([ex[k] for ex in features])
else:
padded_batch[k] = [ex[k] for ex in features]
return padded_batch
class ConstantLengthDataset(IterableDataset):
"""
Iterable dataset that returns constant length chunks of tokens from stream of text files.
The dataset also formats the text before tokenization with a specific format that is provided
by the user.
Args:
tokenizer (`transformers.PreTrainedTokenizer`):
The processor used for processing the data.
dataset (`dataset.Dataset`):
Dataset with text files.
dataset_text_field (`str`, **optional**):
Name of the field in the dataset that contains the text. Used only if `formatting_func` is `None`.
formatting_func (`Callable`, **optional**):
Function that formats the text before tokenization. Usually it is recommended to have follows a certain
pattern such as `"### Question: {question}\n ### Answer: {answer}\n"`
infinite (`bool`, *optional*, defaults to `False`):
If True the iterator is reset after dataset reaches end else stops.
seq_length (`int`, *optional*, defaults to `1024`):
Length of token sequences to return.
num_of_sequences (`int`, *optional*, defaults to `1024`):
Number of token sequences to keep in buffer.
chars_per_token (`int`, *optional*, defaults to `3.6`):
Number of characters per token used to estimate number of tokens in text buffer.
eos_token_id (`int`, *optional*, defaults to `0`):
Id of the end of sequence token if the passed tokenizer does not have an EOS token.
shuffle ('bool', *optional*, defaults to True)
Shuffle the examples before they are returned
append_concat_token ('bool', *optional*, defaults to True)
If true, appends `eos_token_id` at the end of each sample being packed.
add_special_tokens ('bool', *optional*, defaults to True)
If true, tokenizers adds special tokens to each sample being packed.
"""
def __init__(
self,
tokenizer,
dataset,
dataset_text_field=None,
formatting_func=None,
infinite=False,
seq_length=1024,
num_of_sequences=1024,
chars_per_token=3.6,
eos_token_id=0,
shuffle=True,
append_concat_token=True,
add_special_tokens=True,
):
self.tokenizer = tokenizer
if tokenizer.eos_token_id is None:
warnings.warn(
"The passed tokenizer does not have an EOS token. We will use the passed eos_token_id instead which corresponds"
f" to {eos_token_id}. If this is not the correct EOS token, make sure to pass the correct eos_token_id."
)
self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id else eos_token_id
self.dataset = dataset
self.seq_length = seq_length
self.infinite = infinite
self.current_size = 0
self.max_buffer_size = seq_length * chars_per_token * num_of_sequences
self.shuffle = shuffle
self.append_concat_token = append_concat_token
self.add_special_tokens = add_special_tokens
if formatting_func is None:
self.formatting_func = lambda x: x[dataset_text_field]
else:
self.formatting_func = formatting_func
if formatting_func is not None:
if formatting_func.__code__.co_argcount > 1:
warnings.warn(
"The passed formatting_func has more than one argument. Usually that function should have a single argument `example`"
" which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing."
)
def __len__(self):
return len(self.dataset)
def __iter__(self):
iterator = iter(self.dataset)
more_examples = True
while more_examples:
buffer, buffer_len = [], 0
while True:
if buffer_len >= self.max_buffer_size:
break
try:
buffer.append(self.formatting_func(next(iterator)))
buffer_len += len(buffer[-1])
except StopIteration:
if self.infinite:
iterator = iter(self.dataset)
warnings.warn("The dataset reached end and the iterator is reset to the start.")
else:
more_examples = False
break
tokenized_inputs = self.tokenizer(buffer, add_special_tokens=self.add_special_tokens, truncation=False)[
"input_ids"
]
all_token_ids = []
for tokenized_input in tokenized_inputs:
if self.append_concat_token:
tokenized_input = tokenized_input + [self.concat_token_id]
all_token_ids.extend(tokenized_input)
examples = []
for i in range(0, len(all_token_ids), self.seq_length):
input_ids = all_token_ids[i : i + self.seq_length]
if len(input_ids) == self.seq_length:
examples.append(input_ids)
if self.shuffle:
random.shuffle(examples)
for example in examples:
self.current_size += 1
yield {
"input_ids": torch.LongTensor(example),
"labels": torch.LongTensor(example),
}
class RunningMoments:
def __init__(self, accelerator):
"""
Calculates the running mean and standard deviation of a data stream. Reference:
https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L75
"""
self.mean = 0
self.std = 1
self.var = 1
self.count = 1e-24
self.accelerator = accelerator
@torch.no_grad()
def update(self, xs: torch.Tensor) -> Tuple[float, float]:
"""
Updates running moments from batch's moments computed across ranks
"""
if self.accelerator.use_distributed:
xs_mean, xs_var, xs_count = get_global_statistics(self.accelerator, xs)
else:
xs_count = xs.numel()
xs_var, xs_mean = torch.var_mean(xs, unbiased=False)
xs_mean, xs_var = xs_mean.float(), xs_var.float()
delta = xs_mean - self.mean
tot_count = self.count + xs_count
new_sum = xs_var * xs_count
# correct old_sum deviation accounting for the new mean
old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count
tot_sum = old_sum + new_sum
self.mean += delta * xs_count / tot_count
self.var = tot_sum / tot_count
self.std = (self.var * tot_count / (tot_count - 1)).float().sqrt()
self.count = tot_count
return xs_mean.item(), (xs_var * xs_count / (xs_count - 1)).float().sqrt().item()
@torch.no_grad()
def get_global_statistics(accelerator, xs: torch.Tensor, mask=None, device="cpu") -> Tuple[float, float, int]:
"""
Computes element-wise mean and variance of the tensor across processes. Reference:
https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75
"""
xs = xs.to(accelerator.device)
sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device)
sum_and_count = accelerator.reduce(sum_and_count)
global_sum, count = sum_and_count
global_mean = global_sum / count
sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask))
sum_var = accelerator.reduce(sum_var)
global_var = sum_var / count
return global_mean.to(device), global_var.to(device), count.to(device)
def compute_accuracy(eval_pred) -> Dict[str, float]:
predictions, labels = eval_pred
# Here, predictions is rewards_chosen and rewards_rejected.
# We want to see how much of the time rewards_chosen > rewards_rejected.
if np.array(predictions[:, 0] == predictions[:, 1], dtype=float).sum() > 0:
warnings.warn(
f"There are {np.array(predictions[:, 0] == predictions[:, 1]).sum()} out of {len(predictions[:, 0])} instances where the predictions for both options are equal. As a consequence the accuracy can be misleading."
)
predictions = np.argmax(predictions, axis=1)
accuracy = np.array(predictions == labels, dtype=float).mean().item()
return {"accuracy": accuracy}
def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int = -1) -> torch.Tensor:
if tensor.size(dim) >= length:
return tensor
else:
pad_size = list(tensor.shape)
pad_size[dim] = length - tensor.size(dim)
return torch.cat(
[
tensor,
pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device),
],
dim=dim,
)
def disable_dropout_in_model(model: torch.nn.Module) -> None:
for module in model.modules():
if isinstance(module, torch.nn.Dropout):
module.p = 0
def exact_div(a, b, a_str, b_str, custom_error_message=""):
q = a // b
if a != q * b:
raise ValueError(f"{custom_error_message}, {a_str}={a}, {b_str}={b}, inexact division: {a} / {b} = {a / b}")
return q
# copied from https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/stat_tracking.py#L5
class PerPromptStatTracker:
r"""
Class for tracking statistics per prompt. Mainly used to calculate advantage for the DPPO algorithm
Args:
buffer_size (`int`):
Size of the buffer to keep for each prompt.
min_count (`int`):
Minimum number of samples to keep in the buffer before calculating the mean and std.
"""
def __init__(self, buffer_size, min_count):
self.buffer_size = buffer_size
self.min_count = min_count
self.stats = {}
def update(self, prompts, rewards):
prompts = np.array(prompts)
rewards = np.array(rewards)
unique = np.unique(prompts)
advantages = np.empty_like(rewards)
for prompt in unique:
prompt_rewards = rewards[prompts == prompt]
if prompt not in self.stats:
self.stats[prompt] = deque(maxlen=self.buffer_size)
self.stats[prompt].extend(prompt_rewards)
if len(self.stats[prompt]) < self.min_count:
mean = np.mean(rewards)
std = np.std(rewards) + 1e-6
else:
mean = np.mean(self.stats[prompt])
std = np.std(self.stats[prompt]) + 1e-6
advantages[prompts == prompt] = (prompt_rewards - mean) / std
return advantages
def get_stats(self):
return {k: {"mean": np.mean(v), "std": np.std(v), "count": len(v)} for k, v in self.stats.items()}
def neftune_post_forward_hook(module, input, output):
"""
Implements the NEFTune forward pass for the model using forward hooks. Note this works only for
torch.nn.Embedding layers. This method is slightly adapted from the original source code
that can be found here: https://github.com/neelsjain/NEFTune
Simply add it to your model as follows:
```python
model = ...
model.embed_tokens.neftune_noise_alpha = 0.1
model.embed_tokens.register_forward_hook(neftune_post_forward_hook)
```
Args:
module (`torch.nn.Module`):
The embedding module where the hook is attached. Note that you need to set
`module.neftune_noise_alpha` to the desired noise alpha value.
input (`torch.Tensor`):
The input tensor to the model.
output (`torch.Tensor`):
The output tensor of the model (i.e. the embeddings).
"""
if module.training:
dims = torch.tensor(output.size(1) * output.size(2))
mag_norm = module.neftune_noise_alpha / torch.sqrt(dims)
output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
return output
def peft_module_casting_to_bf16(model):
from peft.tuners.tuners_utils import BaseTunerLayer
for name, module in model.named_modules():
if isinstance(module, BaseTunerLayer):
module = module.to(torch.bfloat16)
elif isinstance(module, torch.nn.LayerNorm) or "norm" in name:
module = module.to(torch.float32)
elif any(x in name for x in ["lm_head", "embed_tokens", "wte", "wpe"]):
if hasattr(module, "weight"):
if module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
def trl_sanitze_kwargs_for_tagging(model, tag_names, kwargs=None):
if is_unsloth_available():
# Unsloth adds a new attribute in the model config `unsloth_version`
# to keep track of models that have been patched with unsloth.
if hasattr(model, "config") and getattr(model.config, "unsloth_version", None) is not None:
tag_names.append("unsloth")
if kwargs is not None:
if "tags" not in kwargs:
kwargs["tags"] = tag_names
elif "tags" in kwargs and isinstance(kwargs["tags"], list):
kwargs["tags"].extend(tag_names)
elif "tags" in kwargs and isinstance(kwargs["tags"], str):
tag_names.append(kwargs["tags"])
kwargs["tags"] = tag_names
return kwargs
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/ddpo_config.py | import os
import sys
import warnings
from dataclasses import dataclass, field
from typing import Literal, Optional
from ..core import flatten_dict
from ..import_utils import is_bitsandbytes_available, is_torchvision_available
@dataclass
class DDPOConfig:
"""
Configuration class for DDPOTrainer
"""
# common parameters
exp_name: str = os.path.basename(sys.argv[0])[: -len(".py")]
"""the name of this experiment (by default is the file name without the extension name)"""
run_name: Optional[str] = ""
"""Run name for wandb logging and checkpoint saving."""
seed: int = 0
"""Seed value for random generations"""
log_with: Optional[Literal["wandb", "tensorboard"]] = None
"""Log with either 'wandb' or 'tensorboard', check https://huggingface.co/docs/accelerate/usage_guides/tracking for more details"""
tracker_kwargs: dict = field(default_factory=dict)
"""Keyword arguments for the tracker (e.g. wandb_project)"""
accelerator_kwargs: dict = field(default_factory=dict)
"""Keyword arguments for the accelerator"""
project_kwargs: dict = field(default_factory=dict)
"""Keyword arguments for the accelerator project config (e.g. `logging_dir`)"""
tracker_project_name: str = "trl"
"""Name of project to use for tracking"""
logdir: str = "logs"
"""Top-level logging directory for checkpoint saving."""
# hyperparameters
num_epochs: int = 100
"""Number of epochs to train."""
save_freq: int = 1
"""Number of epochs between saving model checkpoints."""
num_checkpoint_limit: int = 5
"""Number of checkpoints to keep before overwriting old ones."""
mixed_precision: str = "fp16"
"""Mixed precision training."""
allow_tf32: bool = True
"""Allow tf32 on Ampere GPUs."""
resume_from: Optional[str] = ""
"""Resume training from a checkpoint."""
sample_num_steps: int = 50
"""Number of sampler inference steps."""
sample_eta: float = 1.0
"""Eta parameter for the DDIM sampler."""
sample_guidance_scale: float = 5.0
"""Classifier-free guidance weight."""
sample_batch_size: int = 1
"""Batch size (per GPU!) to use for sampling."""
sample_num_batches_per_epoch: int = 2
"""Number of batches to sample per epoch."""
train_batch_size: int = 1
"""Batch size (per GPU!) to use for training."""
train_use_8bit_adam: bool = False
"""Whether to use the 8bit Adam optimizer from bitsandbytes."""
train_learning_rate: float = 3e-4
"""Learning rate."""
train_adam_beta1: float = 0.9
"""Adam beta1."""
train_adam_beta2: float = 0.999
"""Adam beta2."""
train_adam_weight_decay: float = 1e-4
"""Adam weight decay."""
train_adam_epsilon: float = 1e-8
"""Adam epsilon."""
train_gradient_accumulation_steps: int = 1
"""Number of gradient accumulation steps."""
train_max_grad_norm: float = 1.0
"""Maximum gradient norm for gradient clipping."""
train_num_inner_epochs: int = 1
"""Number of inner epochs per outer epoch."""
train_cfg: bool = True
"""Whether or not to use classifier-free guidance during training."""
train_adv_clip_max: float = 5
"""Clip advantages to the range."""
train_clip_range: float = 1e-4
"""The PPO clip range."""
train_timestep_fraction: float = 1.0
"""The fraction of timesteps to train on."""
per_prompt_stat_tracking: bool = False
"""Whether to track statistics for each prompt separately."""
per_prompt_stat_tracking_buffer_size: int = 16
"""Number of reward values to store in the buffer for each prompt."""
per_prompt_stat_tracking_min_count: int = 16
"""The minimum number of reward values to store in the buffer."""
async_reward_computation: bool = False
"""Whether to compute rewards asynchronously."""
max_workers: int = 2
"""The maximum number of workers to use for async reward computation."""
negative_prompts: Optional[str] = ""
"""Comma-separated list of prompts to use as negative examples."""
def to_dict(self):
output_dict = {}
for key, value in self.__dict__.items():
output_dict[key] = value
return flatten_dict(output_dict)
def __post_init__(self):
if self.log_with not in ["wandb", "tensorboard"]:
warnings.warn(
("Accelerator tracking only supports image logging if `log_with` is set to 'wandb' or 'tensorboard'.")
)
if self.log_with == "wandb" and not is_torchvision_available():
warnings.warn("Wandb image logging requires torchvision to be installed")
if self.train_use_8bit_adam and not is_bitsandbytes_available():
raise ImportError(
"You need to install bitsandbytes to use 8bit Adam. "
"You can install it with `pip install bitsandbytes`."
)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/base.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from huggingface_hub import PyTorchModelHubMixin
class BaseTrainer(PyTorchModelHubMixin):
r"""
Base class for all trainers - this base class implements the basic functions that we
need for a trainer.
The trainer needs to have the following functions:
- step: takes in a batch of data and performs a step of training
- loss: takes in a batch of data and returns the loss
- compute_rewards: takes in a batch of data and returns the rewards
- _build_models_and_tokenizer: builds the models and tokenizer
- _build_dataset: builds the dataset
Each user is expected to implement their own trainer class that inherits from this base
if they want to use a new training algorithm.
"""
def __init__(self, config):
self.config = config
def step(self, *args):
raise NotImplementedError("Not implemented")
def loss(self, *args):
raise NotImplementedError("Not implemented")
def compute_rewards(self, *args):
raise NotImplementedError("Not implemented")
def _save_pretrained(self, save_directory):
raise NotImplementedError("Not implemented")
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/ppo_config.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import warnings
from dataclasses import dataclass, field
from typing import Literal, Optional
import numpy as np
import tyro
from typing_extensions import Annotated
from trl.trainer.utils import exact_div
from ..core import flatten_dict
from ..import_utils import is_wandb_available
JSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar="JSON", constructor=json.loads)]
@dataclass
class PPOConfig:
"""
Configuration class for PPOTrainer
"""
# common parameters
exp_name: str = os.path.basename(sys.argv[0])[: -len(".py")]
"""the name of this experiment (by default is the file name without the extension name)"""
seed: int = 0
"""Seed value for random generations"""
log_with: Optional[Literal["wandb", "tensorboard"]] = None
"""Log with either 'wandb' or 'tensorboard', check https://huggingface.co/docs/accelerate/usage_guides/tracking for more details"""
task_name: Optional[str] = None
"""Name of task to use - used only for tracking purposes"""
model_name: Optional[str] = None
"""Name of model to use - used only for tracking purposes"""
query_dataset: Optional[str] = None
"""Name of dataset to query - used only for tracking purposes"""
reward_model: Optional[str] = None
"""The reward model to use - used only for tracking purposes"""
remove_unused_columns: bool = True
"""Remove unused columns from the dataset if `datasets.Dataset` is used"""
tracker_kwargs: JSONDict = field(default_factory=dict)
"""Keyword arguments for the tracker (e.g. python ppo.py --ppo_config.tracker_kwargs='{"wandb": {"entity": "my_wandb_entity", "name": "my_exp_name"}}'"""
accelerator_kwargs: JSONDict = field(default_factory=dict)
"""Keyword arguments for the accelerator"""
project_kwargs: JSONDict = field(default_factory=dict)
"""Keyword arguments for the accelerator project config (e.g. `logging_dir`)"""
tracker_project_name: str = "trl"
"""Name of project to use for tracking"""
push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict)
"""Keyword arguments for pushing model to the hub during training (e.g. repo_id)"""
# hyperparameters
steps: int = 20000
"""Number of training steps"""
learning_rate: float = 1e-5
"""Adam learning rate"""
adap_kl_ctrl: bool = True
"""Use adaptive KL control, otherwise linear"""
init_kl_coef: Optional[float] = 0.2
"""Initial KL penalty coefficient (used for adaptive and linear control)"""
kl_penalty: Literal["kl", "abs", "mse", "full"] = "kl"
"""kl penalty options: 'kl': model_logp - ref_logp, 'abs': abs(kl), 'mse': mean squared error mse(kl) and 'full': the actual kl for all tokens in the distribution"""
target: Optional[float] = 6
"""Target KL value for adaptive KL control"""
horizon: Optional[float] = 10000
"""Horizon for adaptive KL control"""
gamma: float = 1
"""Gamma parameter for advantage calculation"""
lam: float = 0.95
"""Lambda parameter for advantage calculation"""
cliprange: float = 0.2
"""Range for clipping in PPO policy gradient loss"""
cliprange_value: float = 0.2
"""Range for clipping values in loss calculation"""
vf_coef: float = 0.1
"""Scaling factor for value loss"""
batch_size: int = 256
"""Number of samples per optimisation step"""
forward_batch_size: Optional[int] = None
"""DEPRECATED: use `mini_batch_size` instead, which does the same thing."""
mini_batch_size: int = 1
"""Number of samples optimized in each mini batch"""
gradient_accumulation_steps: int = 1
"""The number of gradient accumulation steps"""
world_size: tyro.conf.Suppress[int] = None
"""The world size for distributed training"""
ppo_epochs: int = 4
"""Number of optimisation epochs per batch of samples"""
max_grad_norm: Optional[float] = None
"""Maximum gradient norm for gradient clipping"""
optimize_cuda_cache: Optional[bool] = None
"""DEPRECATED: use `optimize_device_cache` instead, which does the same thing."""
optimize_device_cache: Optional[bool] = False
"""Optimize device cache for slightly more memory-efficient training"""
early_stopping: bool = False
"""Whether to stop the PPO optimization loop early is the KL too high"""
target_kl: float = 1
"""Stop early if we exceed this value by over 50%"""
compare_steps: int = 1
"""Number of steps between comparison of the current reward with the best seen so far"""
ratio_threshold: float = 10.0
"""Skip mini-batches with high PPO ratios that can cause loss spikes"""
use_score_scaling: bool = False
"""Use score scaling"""
use_score_norm: bool = False
"""Use score normalization. Only applicable if use_score_scaling is True"""
score_clip: Optional[float] = None
"""Score clipping"""
whiten_rewards: bool = False
"""Whiten the rewards before compute advantages"""
# computed hyperparameters at runtime; we use `tyro.conf.Suppress` to hide them from the help text
is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None
"""TO BE FILLED In RUNTIME: Whether the model is an encoder-decoder model"""
is_peft_model: Optional[tyro.conf.Suppress[bool]] = None
"""TO BE FILLED In RUNTIME: Whether the model is a PEFT model"""
backward_batch_size: tyro.conf.Suppress[int] = None
"""TO BE FILLED In RUNTIME: Number of samples optimized in an `optimizer.step()` call"""
global_backward_batch_size: tyro.conf.Suppress[int] = None
"""TO BE FILLED In RUNTIME: the effective `backward_batch_size` across all processes"""
global_batch_size: tyro.conf.Suppress[int] = None
"""TO BE FILLED In RUNTIME: the effective `batch_size` across all processes"""
if optimize_cuda_cache is not None:
warnings.warn(
"The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead."
)
optimize_device_cache = optimize_cuda_cache
else:
optimize_device_cache = False
def __post_init__(self):
if self.forward_batch_size is not None:
warnings.warn(
"Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization."
)
self.mini_batch_size = self.forward_batch_size
self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps
exact_div(
self.batch_size,
self.backward_batch_size,
"`batch_size`",
"`mini_batch_size * gradient_accumulation_steps`",
"`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`",
)
# check if wandb is installed
if self.log_with == "wandb":
# raise error if wandb is not installed
if not is_wandb_available():
raise ImportError(
"Please install wandb to use wandb logging. You can do this by running `pip install wandb`."
)
self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size))
assert self.kl_penalty in ["kl", "abs", "mse", "full"]
def to_dict(self):
output_dict = {}
for key, value in self.__dict__.items():
output_dict[key] = value
return flatten_dict(output_dict)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/trainer/ddpo_trainer.py | # Copyright 2023 DDPO-pytorch authors (Kevin Black), metric-space, The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from collections import defaultdict
from concurrent import futures
from typing import Any, Callable, Optional, Tuple
from warnings import warn
import torch
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import whoami
from ..models import DDPOStableDiffusionPipeline
from . import BaseTrainer, DDPOConfig
from .utils import PerPromptStatTracker
logger = get_logger(__name__)
MODEL_CARD_TEMPLATE = """---
license: apache-2.0
tags:
- trl
- ddpo
- diffusers
- reinforcement-learning
- text-to-image
- stable-diffusion
---
# {model_name}
This is a diffusion model that has been fine-tuned with reinforcement learning to
guide the model outputs according to a value, function, or human feedback. The model can be used for image generation conditioned with text.
"""
class DDPOTrainer(BaseTrainer):
"""
The DDPOTrainer uses Deep Diffusion Policy Optimization to optimise diffusion models.
Note, this trainer is heavily inspired by the work here: https://github.com/kvablack/ddpo-pytorch
As of now only Stable Diffusion based pipelines are supported
Attributes:
**config** (`DDPOConfig`) -- Configuration object for DDPOTrainer. Check the documentation of `PPOConfig` for more
details.
**reward_function** (Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor]) -- Reward function to be used
**prompt_function** (Callable[[], Tuple[str, Any]]) -- Function to generate prompts to guide model
**sd_pipeline** (`DDPOStableDiffusionPipeline`) -- Stable Diffusion pipeline to be used for training.
**image_samples_hook** (Optional[Callable[[Any, Any, Any], Any]]) -- Hook to be called to log images
"""
_tag_names = ["trl", "ddpo"]
def __init__(
self,
config: DDPOConfig,
reward_function: Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor],
prompt_function: Callable[[], Tuple[str, Any]],
sd_pipeline: DDPOStableDiffusionPipeline,
image_samples_hook: Optional[Callable[[Any, Any, Any], Any]] = None,
):
if image_samples_hook is None:
warn("No image_samples_hook provided; no images will be logged")
self.prompt_fn = prompt_function
self.reward_fn = reward_function
self.config = config
self.image_samples_callback = image_samples_hook
accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs)
if self.config.resume_from:
self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from))
if "checkpoint_" not in os.path.basename(self.config.resume_from):
# get the most recent checkpoint in this directory
checkpoints = list(
filter(
lambda x: "checkpoint_" in x,
os.listdir(self.config.resume_from),
)
)
if len(checkpoints) == 0:
raise ValueError(f"No checkpoints found in {self.config.resume_from}")
checkpoint_numbers = sorted([int(x.split("_")[-1]) for x in checkpoints])
self.config.resume_from = os.path.join(
self.config.resume_from,
f"checkpoint_{checkpoint_numbers[-1]}",
)
accelerator_project_config.iteration = checkpoint_numbers[-1] + 1
# number of timesteps within each trajectory to train on
self.num_train_timesteps = int(self.config.sample_num_steps * self.config.train_timestep_fraction)
self.accelerator = Accelerator(
log_with=self.config.log_with,
mixed_precision=self.config.mixed_precision,
project_config=accelerator_project_config,
# we always accumulate gradients across timesteps; we want config.train.gradient_accumulation_steps to be the
# number of *samples* we accumulate across, so we need to multiply by the number of training timesteps to get
# the total number of optimizer steps to accumulate across.
gradient_accumulation_steps=self.config.train_gradient_accumulation_steps * self.num_train_timesteps,
**self.config.accelerator_kwargs,
)
is_okay, message = self._config_check()
if not is_okay:
raise ValueError(message)
is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard"
if self.accelerator.is_main_process:
self.accelerator.init_trackers(
self.config.tracker_project_name,
config=dict(ddpo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(),
init_kwargs=self.config.tracker_kwargs,
)
logger.info(f"\n{config}")
set_seed(self.config.seed, device_specific=True)
self.sd_pipeline = sd_pipeline
self.sd_pipeline.set_progress_bar_config(
position=1,
disable=not self.accelerator.is_local_main_process,
leave=False,
desc="Timestep",
dynamic_ncols=True,
)
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
if self.accelerator.mixed_precision == "fp16":
inference_dtype = torch.float16
elif self.accelerator.mixed_precision == "bf16":
inference_dtype = torch.bfloat16
else:
inference_dtype = torch.float32
self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype)
self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype)
self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype)
trainable_layers = self.sd_pipeline.get_trainable_layers()
self.accelerator.register_save_state_pre_hook(self._save_model_hook)
self.accelerator.register_load_state_pre_hook(self._load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if self.config.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
self.optimizer = self._setup_optimizer(
trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers
)
self.neg_prompt_embed = self.sd_pipeline.text_encoder(
self.sd_pipeline.tokenizer(
[""] if self.config.negative_prompts is None else self.config.negative_prompts,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=self.sd_pipeline.tokenizer.model_max_length,
).input_ids.to(self.accelerator.device)
)[0]
if config.per_prompt_stat_tracking:
self.stat_tracker = PerPromptStatTracker(
config.per_prompt_stat_tracking_buffer_size,
config.per_prompt_stat_tracking_min_count,
)
# NOTE: for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses
# more memory
self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast
if hasattr(self.sd_pipeline, "use_lora") and self.sd_pipeline.use_lora:
unet, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer)
self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters()))
else:
self.trainable_layers, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer)
if self.config.async_reward_computation:
self.executor = futures.ThreadPoolExecutor(max_workers=config.max_workers)
if config.resume_from:
logger.info(f"Resuming from {config.resume_from}")
self.accelerator.load_state(config.resume_from)
self.first_epoch = int(config.resume_from.split("_")[-1]) + 1
else:
self.first_epoch = 0
def compute_rewards(self, prompt_image_pairs, is_async=False):
if not is_async:
rewards = []
for images, prompts, prompt_metadata in prompt_image_pairs:
reward, reward_metadata = self.reward_fn(images, prompts, prompt_metadata)
rewards.append(
(
torch.as_tensor(reward, device=self.accelerator.device),
reward_metadata,
)
)
else:
rewards = self.executor.map(lambda x: self.reward_fn(*x), prompt_image_pairs)
rewards = [
(torch.as_tensor(reward.result(), device=self.accelerator.device), reward_metadata.result())
for reward, reward_metadata in rewards
]
return zip(*rewards)
def step(self, epoch: int, global_step: int):
"""
Perform a single step of training.
Args:
epoch (int): The current epoch.
global_step (int): The current global step.
Side Effects:
- Model weights are updated
- Logs the statistics to the accelerator trackers.
- If `self.image_samples_callback` is not None, it will be called with the prompt_image_pairs, global_step, and the accelerator tracker.
Returns:
global_step (int): The updated global step.
"""
samples, prompt_image_data = self._generate_samples(
iterations=self.config.sample_num_batches_per_epoch,
batch_size=self.config.sample_batch_size,
)
# collate samples into dict where each entry has shape (num_batches_per_epoch * sample.batch_size, ...)
samples = {k: torch.cat([s[k] for s in samples]) for k in samples[0].keys()}
rewards, rewards_metadata = self.compute_rewards(
prompt_image_data, is_async=self.config.async_reward_computation
)
for i, image_data in enumerate(prompt_image_data):
image_data.extend([rewards[i], rewards_metadata[i]])
if self.image_samples_callback is not None:
self.image_samples_callback(prompt_image_data, global_step, self.accelerator.trackers[0])
rewards = torch.cat(rewards)
rewards = self.accelerator.gather(rewards).cpu().numpy()
self.accelerator.log(
{
"reward": rewards,
"epoch": epoch,
"reward_mean": rewards.mean(),
"reward_std": rewards.std(),
},
step=global_step,
)
if self.config.per_prompt_stat_tracking:
# gather the prompts across processes
prompt_ids = self.accelerator.gather(samples["prompt_ids"]).cpu().numpy()
prompts = self.sd_pipeline.tokenizer.batch_decode(prompt_ids, skip_special_tokens=True)
advantages = self.stat_tracker.update(prompts, rewards)
else:
advantages = (rewards - rewards.mean()) / (rewards.std() + 1e-8)
# ungather advantages; keep the entries corresponding to the samples on this process
samples["advantages"] = (
torch.as_tensor(advantages)
.reshape(self.accelerator.num_processes, -1)[self.accelerator.process_index]
.to(self.accelerator.device)
)
del samples["prompt_ids"]
total_batch_size, num_timesteps = samples["timesteps"].shape
for inner_epoch in range(self.config.train_num_inner_epochs):
# shuffle samples along batch dimension
perm = torch.randperm(total_batch_size, device=self.accelerator.device)
samples = {k: v[perm] for k, v in samples.items()}
# shuffle along time dimension independently for each sample
# still trying to understand the code below
perms = torch.stack(
[torch.randperm(num_timesteps, device=self.accelerator.device) for _ in range(total_batch_size)]
)
for key in ["timesteps", "latents", "next_latents", "log_probs"]:
samples[key] = samples[key][
torch.arange(total_batch_size, device=self.accelerator.device)[:, None],
perms,
]
original_keys = samples.keys()
original_values = samples.values()
# rebatch them as user defined train_batch_size is different from sample_batch_size
reshaped_values = [v.reshape(-1, self.config.train_batch_size, *v.shape[1:]) for v in original_values]
# Transpose the list of original values
transposed_values = zip(*reshaped_values)
# Create new dictionaries for each row of transposed values
samples_batched = [dict(zip(original_keys, row_values)) for row_values in transposed_values]
self.sd_pipeline.unet.train()
global_step = self._train_batched_samples(inner_epoch, epoch, global_step, samples_batched)
# ensure optimization step at the end of the inner epoch
if not self.accelerator.sync_gradients:
raise ValueError(
"Optimization step should have been performed by this point. Please check calculated gradient accumulation settings."
)
if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process:
self.accelerator.save_state()
return global_step
def calculate_loss(self, latents, timesteps, next_latents, log_probs, advantages, embeds):
"""
Calculate the loss for a batch of an unpacked sample
Args:
latents (torch.Tensor):
The latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width]
timesteps (torch.Tensor):
The timesteps sampled from the diffusion model, shape: [batch_size]
next_latents (torch.Tensor):
The next latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width]
log_probs (torch.Tensor):
The log probabilities of the latents, shape: [batch_size]
advantages (torch.Tensor):
The advantages of the latents, shape: [batch_size]
embeds (torch.Tensor):
The embeddings of the prompts, shape: [2*batch_size or batch_size, ...]
Note: the "or" is because if train_cfg is True, the expectation is that negative prompts are concatenated to the embeds
Returns:
loss (torch.Tensor), approx_kl (torch.Tensor), clipfrac (torch.Tensor)
(all of these are of shape (1,))
"""
with self.autocast():
if self.config.train_cfg:
noise_pred = self.sd_pipeline.unet(
torch.cat([latents] * 2),
torch.cat([timesteps] * 2),
embeds,
).sample
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.config.sample_guidance_scale * (
noise_pred_text - noise_pred_uncond
)
else:
noise_pred = self.sd_pipeline.unet(
latents,
timesteps,
embeds,
).sample
# compute the log prob of next_latents given latents under the current model
scheduler_step_output = self.sd_pipeline.scheduler_step(
noise_pred,
timesteps,
latents,
eta=self.config.sample_eta,
prev_sample=next_latents,
)
log_prob = scheduler_step_output.log_probs
advantages = torch.clamp(
advantages,
-self.config.train_adv_clip_max,
self.config.train_adv_clip_max,
)
ratio = torch.exp(log_prob - log_probs)
loss = self.loss(advantages, self.config.train_clip_range, ratio)
approx_kl = 0.5 * torch.mean((log_prob - log_probs) ** 2)
clipfrac = torch.mean((torch.abs(ratio - 1.0) > self.config.train_clip_range).float())
return loss, approx_kl, clipfrac
def loss(
self,
advantages: torch.Tensor,
clip_range: float,
ratio: torch.Tensor,
):
unclipped_loss = -advantages * ratio
clipped_loss = -advantages * torch.clamp(
ratio,
1.0 - clip_range,
1.0 + clip_range,
)
return torch.mean(torch.maximum(unclipped_loss, clipped_loss))
def _setup_optimizer(self, trainable_layers_parameters):
if self.config.train_use_8bit_adam:
import bitsandbytes
optimizer_cls = bitsandbytes.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
return optimizer_cls(
trainable_layers_parameters,
lr=self.config.train_learning_rate,
betas=(self.config.train_adam_beta1, self.config.train_adam_beta2),
weight_decay=self.config.train_adam_weight_decay,
eps=self.config.train_adam_epsilon,
)
def _save_model_hook(self, models, weights, output_dir):
self.sd_pipeline.save_checkpoint(models, weights, output_dir)
weights.pop() # ensures that accelerate doesn't try to handle saving of the model
def _load_model_hook(self, models, input_dir):
self.sd_pipeline.load_checkpoint(models, input_dir)
models.pop() # ensures that accelerate doesn't try to handle loading of the model
def _generate_samples(self, iterations, batch_size):
"""
Generate samples from the model
Args:
iterations (int): Number of iterations to generate samples for
batch_size (int): Batch size to use for sampling
Returns:
samples (List[Dict[str, torch.Tensor]]), prompt_image_pairs (List[List[Any]])
"""
samples = []
prompt_image_pairs = []
self.sd_pipeline.unet.eval()
sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1)
for _ in range(iterations):
prompts, prompt_metadata = zip(*[self.prompt_fn() for _ in range(batch_size)])
prompt_ids = self.sd_pipeline.tokenizer(
prompts,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=self.sd_pipeline.tokenizer.model_max_length,
).input_ids.to(self.accelerator.device)
prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0]
with self.autocast():
sd_output = self.sd_pipeline(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=sample_neg_prompt_embeds,
num_inference_steps=self.config.sample_num_steps,
guidance_scale=self.config.sample_guidance_scale,
eta=self.config.sample_eta,
output_type="pt",
)
images = sd_output.images
latents = sd_output.latents
log_probs = sd_output.log_probs
latents = torch.stack(latents, dim=1) # (batch_size, num_steps + 1, ...)
log_probs = torch.stack(log_probs, dim=1) # (batch_size, num_steps, 1)
timesteps = self.sd_pipeline.scheduler.timesteps.repeat(batch_size, 1) # (batch_size, num_steps)
samples.append(
{
"prompt_ids": prompt_ids,
"prompt_embeds": prompt_embeds,
"timesteps": timesteps,
"latents": latents[:, :-1], # each entry is the latent before timestep t
"next_latents": latents[:, 1:], # each entry is the latent after timestep t
"log_probs": log_probs,
"negative_prompt_embeds": sample_neg_prompt_embeds,
}
)
prompt_image_pairs.append([images, prompts, prompt_metadata])
return samples, prompt_image_pairs
def _train_batched_samples(self, inner_epoch, epoch, global_step, batched_samples):
"""
Train on a batch of samples. Main training segment
Args:
inner_epoch (int): The current inner epoch
epoch (int): The current epoch
global_step (int): The current global step
batched_samples (List[Dict[str, torch.Tensor]]): The batched samples to train on
Side Effects:
- Model weights are updated
- Logs the statistics to the accelerator trackers.
Returns:
global_step (int): The updated global step
"""
info = defaultdict(list)
for i, sample in enumerate(batched_samples):
if self.config.train_cfg:
# concat negative prompts to sample prompts to avoid two forward passes
embeds = torch.cat([sample["negative_prompt_embeds"], sample["prompt_embeds"]])
else:
embeds = sample["prompt_embeds"]
for j in range(self.num_train_timesteps):
with self.accelerator.accumulate(self.sd_pipeline.unet):
loss, approx_kl, clipfrac = self.calculate_loss(
sample["latents"][:, j],
sample["timesteps"][:, j],
sample["next_latents"][:, j],
sample["log_probs"][:, j],
sample["advantages"],
embeds,
)
info["approx_kl"].append(approx_kl)
info["clipfrac"].append(clipfrac)
info["loss"].append(loss)
self.accelerator.backward(loss)
if self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(
self.trainable_layers.parameters()
if not isinstance(self.trainable_layers, list)
else self.trainable_layers,
self.config.train_max_grad_norm,
)
self.optimizer.step()
self.optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if self.accelerator.sync_gradients:
# log training-related stuff
info = {k: torch.mean(torch.stack(v)) for k, v in info.items()}
info = self.accelerator.reduce(info, reduction="mean")
info.update({"epoch": epoch, "inner_epoch": inner_epoch})
self.accelerator.log(info, step=global_step)
global_step += 1
info = defaultdict(list)
return global_step
def _config_check(self) -> Tuple[bool, str]:
samples_per_epoch = (
self.config.sample_batch_size * self.accelerator.num_processes * self.config.sample_num_batches_per_epoch
)
total_train_batch_size = (
self.config.train_batch_size
* self.accelerator.num_processes
* self.config.train_gradient_accumulation_steps
)
if not self.config.sample_batch_size >= self.config.train_batch_size:
return (
False,
f"Sample batch size ({self.config.sample_batch_size}) must be greater than or equal to the train batch size ({self.config.train_batch_size})",
)
if not self.config.sample_batch_size % self.config.train_batch_size == 0:
return (
False,
f"Sample batch size ({self.config.sample_batch_size}) must be divisible by the train batch size ({self.config.train_batch_size})",
)
if not samples_per_epoch % total_train_batch_size == 0:
return (
False,
f"Number of samples per epoch ({samples_per_epoch}) must be divisible by the total train batch size ({total_train_batch_size})",
)
return True, ""
def train(self, epochs: Optional[int] = None):
"""
Train the model for a given number of epochs
"""
global_step = 0
if epochs is None:
epochs = self.config.num_epochs
for epoch in range(self.first_epoch, epochs):
global_step = self.step(epoch, global_step)
def create_model_card(self, path: str, model_name: Optional[str] = "TRL DDPO Model") -> None:
"""Creates and saves a model card for a TRL model.
Args:
path (`str`): The path to save the model card to.
model_name (`str`, *optional*): The name of the model, defaults to `TRL DDPO Model`.
"""
try:
user = whoami()["name"]
# handle the offline case
except: # noqa
warnings.warn("Cannot retrieve user information assuming you are running in offline mode.")
return
if not os.path.exists(path):
os.makedirs(path)
model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}")
with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f:
f.write(model_card_content)
def _save_pretrained(self, save_directory):
self.sd_pipeline.save_pretrained(save_directory)
self.create_model_card(save_directory)
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/extras/best_of_n_sampler.py | from typing import Any, Callable, List, Optional, Union
import torch
from transformers import GenerationConfig, PreTrainedTokenizer, PreTrainedTokenizerFast
from ..core import set_seed
from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper
class BestOfNSampler(object):
def __init__(
self,
model: PreTrainedModelWrapper,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
queries_to_scores: Callable[[List[str]], List[float]],
length_sampler: Any,
sample_size: int = 4,
seed: Optional[int] = None,
n_candidates: int = 1,
generation_config: Optional[GenerationConfig] = None,
) -> None:
r"""
Initialize the sampler for best-of-n generation
Args:
model (`PreTrainedModelWrapper`):
The pretrained model to use for generation
tokenizer (`PreTrainedTokenizer` or `PreTrainedTokenizerFast`):
Tokenizer associated with the pretrained model
queries_to_scores (`Callable[[List[str]], List[float]]`):
Callable that takes a list of generated texts and returns the associated reward scores
length_sampler (`Any`):
Sampler used to sample the length of the generated text
sample_size (`int`):
Number of samples to generate for each query
seed (`int`, *optional*):
Random seed used to control generation
n_candidates (`int`):
Number of candidates to return for each query
generation_config (`GenerationConfig`, *optional*):
Generation config passed to the underlying model's `generate` method.
See `GenerationConfig` (https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/text_generation#transformers.GenerationConfig) for more details
"""
if seed is not None:
set_seed(seed)
if not isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)):
raise ValueError(
f"tokenizer must be a PreTrainedTokenizer or PreTrainedTokenizerFast, got {type(tokenizer)}"
)
if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
raise ValueError(
f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
)
self.model = model
self.tokenizer = tokenizer
self.queries_to_scores = queries_to_scores
self.length_sampler = length_sampler
self.gen_config = generation_config
self.sample_size = sample_size
self.n_candidates = n_candidates
def generate(
self,
tokenized_query: Union[List[int], torch.Tensor, List[torch.Tensor], List[List[int]]],
skip_special_tokens: bool = True,
device: Optional[Union[str, torch.device]] = None,
**generation_kwargs,
) -> List[List[str]]:
r"""
Generate the best of n samples for input queries
Args:
tokenized_query (`List[int]` or `torch.Tensor` or `List[torch.Tensor]` or `List[int]`):
represents either a single tokenized query (a single tensor or a list of integers) or a batch of tokenized queries (a list of tensors or a list of lists of integers)
skip_special_tokens (`bool`):
Whether to remove the special tokens from the output
device (`str` or `torch.device`, *optional*):
The device on which the model will be loaded
**generation_kwargs (`dict`, *optional*):
Additional keyword arguments passed along to the underlying model's `generate` method.
This is used to override generation config
Returns:
List[List[str]]: A list of lists of generated texts
"""
queries = None
if isinstance(tokenized_query, torch.Tensor) and tokenized_query.ndim == 1:
queries = tokenized_query.unsqueeze(0)
elif isinstance(tokenized_query, List):
element_type = type(tokenized_query[0])
if element_type == int:
queries = torch.tensor(tokenized_query).unsqueeze(0)
elif element_type == torch.Tensor:
queries = [tensor.reshape((1, -1)) for tensor in tokenized_query]
else:
queries = [torch.tensor(query).reshape((1, -1)) for query in tokenized_query]
result = []
for query in queries:
queries = query.repeat((self.sample_size, 1))
output = self.model.generate(
queries.to(device),
max_new_tokens=self.length_sampler(),
generation_config=self.gen_config,
**generation_kwargs,
).squeeze()
output = self.tokenizer.batch_decode(output, skip_special_tokens=skip_special_tokens)
scores = torch.tensor(self.queries_to_scores(output))
output = [output[i] for i in scores.topk(self.n_candidates).indices]
result.append(output)
return result
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/extras/__init__.py | # flake8: noqa
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .best_of_n_sampler import BestOfNSampler
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/extras/dataset_formatting.py | import logging
from typing import Callable, Literal, Optional, Union
from datasets import Dataset, Value
from transformers import AutoTokenizer
from ..trainer.utils import ConstantLengthDataset
FORMAT_MAPPING = {
"chatml": [{"content": Value(dtype="string", id=None), "role": Value(dtype="string", id=None)}],
"instruction": {"completion": Value(dtype="string", id=None), "prompt": Value(dtype="string", id=None)},
}
def conversations_formatting_function(tokenizer: AutoTokenizer, messages_field: Literal["messages", "conversations"]):
r"""
return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the tokenizer
apply chat template to the dataset
"""
def format_dataset(examples):
if isinstance(examples[messages_field][0], list):
output_texts = []
for i in range(len(examples[messages_field])):
output_texts.append(tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False))
return output_texts
else:
return tokenizer.apply_chat_template(examples[messages_field], tokenize=False)
return format_dataset
def instructions_formatting_function(tokenizer: AutoTokenizer):
r"""
return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the tokenizer
apply chat template to the dataset
"""
def format_dataset(examples):
if isinstance(examples["prompt"], list):
output_texts = []
for i in range(len(examples["prompt"])):
converted_sample = [
{"role": "user", "content": examples["prompt"][i]},
{"role": "assistant", "content": examples["completion"][i]},
]
output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False))
return output_texts
else:
converted_sample = [
{"role": "user", "content": examples["prompt"]},
{"role": "assistant", "content": examples["completion"]},
]
return tokenizer.apply_chat_template(converted_sample, tokenize=False)
return format_dataset
def get_formatting_func_from_dataset(
dataset: Union[Dataset, ConstantLengthDataset], tokenizer: AutoTokenizer
) -> Optional[Callable]:
r"""
Finds the correct formatting function based on the dataset structure. Currently supported datasets are:
- `ChatML` with [{"role": str, "content": str}]
- `instruction` with [{"prompt": str, "completion": str}]
Args:
dataset (Dataset): User dataset
tokenizer (AutoTokenizer): Tokenizer used for formatting
Returns:
Callable: Formatting function if the dataset format is supported else None
"""
if isinstance(dataset, Dataset):
if "messages" in dataset.features:
if dataset.features["messages"] == FORMAT_MAPPING["chatml"]:
logging.info("Formatting dataset with chatml format")
return conversations_formatting_function(tokenizer, "messages")
if "conversations" in dataset.features:
if dataset.features["conversations"] == FORMAT_MAPPING["chatml"]:
logging.info("Formatting dataset with chatml format")
return conversations_formatting_function(tokenizer, "conversations")
elif dataset.features == FORMAT_MAPPING["instruction"]:
logging.info("Formatting dataset with instruction format")
return instructions_formatting_function(tokenizer)
return None
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/environment/__init__.py | # flake8: noqa
from .base_environment import TextEnvironment, TextHistory
| 0 |
hf_public_repos/trl/trl | hf_public_repos/trl/trl/environment/base_environment.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
import torch
from accelerate.utils import extract_model_from_parallel
from transformers import StoppingCriteria, StoppingCriteriaList
from ..import_utils import is_rich_available
if is_rich_available():
from rich import print
from rich.text import Text
class StringStoppingCriteria(StoppingCriteria):
"""Custom `StoppingCriteria` which checks if all generations in the batch are completed."""
def __init__(self, stop_strings, tokenizer):
self.stop_strings = stop_strings
self.tokenizer = tokenizer
self.first_call = True
def __call__(self, input_ids, scores, **kwargs):
"""Returns true if all generated sequences contain any of the stop strings."""
if self.first_call:
self.generated_tokens = [1 for _ in range(input_ids.shape[0])]
self.start_length = input_ids.shape[-1] - 1
self.first_call = False
decoded_generations = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
done = []
for i, decoded_generation in enumerate(decoded_generations):
sequence_complete = any([stop_string in decoded_generation for stop_string in self.stop_strings])
done.append(sequence_complete)
if not sequence_complete:
self.generated_tokens[i] += 1
if all(done):
self.first_call = True
return all(done)
class TextHistory:
"""The TextHistory class keeps track of the history of an interaction between the language model and the environment."""
def __init__(self, text, tokens, system=True):
"""
Initialize TextHistory.
args:
text (`str`): The text of the first segment.
tokens (`torch.LongTensor`): The tokens of the first segment.
system (`bool`, *optional*): Whether the first segment is a system or user segment.
"""
self.system_spans = []
self.text_spans = []
self.token_spans = []
self.token_masks = torch.tensor([], dtype=torch.long).to(tokens.device)
self.text = ""
self.tokens = torch.tensor([], dtype=torch.long).to(tokens.device)
self.completed = False
self.truncated = False
self.reward = 0.0
self.prompt_color = "black on grey85"
self.system_color = "black on cyan3"
self.model_color = "black on deep_sky_blue1"
self.reward_color = "black on plum1"
self.append_segment(text, tokens, system=system)
def append_segment(self, text, tokens, system=True):
"""
Append a new segment to the history.
args:
text (`str`): The text of the new segment.
tokens (`torch.LongTensor`): The tokens of the new segment.
system (`bool`, *optional*): Whether the new segment is a system or user segment.
"""
if len(text) == 0 or len(tokens) == 0:
raise ValueError("Can't append empty text or token list to history.")
original_text_length = len(self.text)
self.text += text
self.text_spans.append((original_text_length, len(self.text)))
self.system_spans.append(system)
original_token_length = len(self.tokens)
self.tokens = torch.cat((self.tokens, tokens))
if system:
self.token_masks = torch.cat((self.token_masks, torch.zeros_like(tokens)))
else:
self.token_masks = torch.cat((self.token_masks, torch.ones_like(tokens)))
self.token_spans.append((original_token_length, len(self.tokens)))
def complete(self, truncated=False):
"""
Mark the history as completed.
"""
self.completed = True
self.truncated = truncated
@property
def last_text_segment(self):
"""
Get the last text segment.
"""
start, end = self.text_spans[-1]
return self.text[start:end]
def split_query_response_tokens(self):
"""
Split the tokens into query and response tokens.
"""
split_index = self.token_spans[0][1]
query = self.tokens[:split_index]
response = self.tokens[split_index:]
mask = self.token_masks[split_index:]
return query, response, mask
def show_text(self, show_legend=False):
"""
Print the text history.
"""
if not is_rich_available():
warnings.warn("install rich to display text")
return
text = Text(self.text)
text.stylize(self.prompt_color, self.text_spans[0][0], self.text_spans[1][0])
for i, (start, end) in enumerate(self.text_spans[1:]):
if self.system_spans[i + 1]:
text.stylize(self.system_color, start, end)
else:
text.stylize(self.model_color, start, end)
text.append(f"\n\nReward: {self.reward}", style=self.reward_color)
print(text)
if show_legend:
self.show_colour_legend()
def show_tokens(self, tokenizer, show_legend=False):
"""
Print the history tokens.
"""
if not is_rich_available():
warnings.warn("install rich to display tokens")
return
text = Text()
prompt_end = self.token_spans[0][1]
for i, (token, mask) in enumerate(zip(self.tokens, self.token_masks)):
if i < prompt_end:
text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.prompt_color)
text.append(" ")
elif mask == 0:
text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.system_color)
text.append(" ")
else:
text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.model_color)
text.append(" ")
text.append(f"\n\nReward: {self.reward}", style=self.reward_color)
print(text)
if show_legend:
self.show_colour_legend()
def show_colour_legend(self):
"""
Print the colour legend.
"""
if not is_rich_available():
warnings.warn("install rich to display colour legend")
return
text = Text("\n\n(Colour Legend: ")
text.append("Prompt", style=self.prompt_color)
text.append("|")
text.append("System", style=self.system_color)
text.append("|")
text.append("Model", style=self.model_color)
text.append("|")
text.append("Reward", style=self.reward_color)
text.append(")")
print(text)
class TextEnvironment:
"""
The TextEnvironment enables interaction of a LLM with an environment using tools.
"""
def __init__(
self,
model=None,
tokenizer=None,
tools=None,
reward_fn=None,
prompt=None,
max_turns=4,
max_tool_reponse=100,
max_length=None,
generation_kwargs=None,
):
"""
Initialize TextEnvironment.
Args:
model (`PreTrainedModelWrapper`): The model to use for generation.
tokenizer (`transformers.PreTrainedTokenizer`): The tokenizer to use for generation.
tools (list): A list of tools to use for interaction.
reward_fn (function): A function that takes a string and returns a reward.
prompt (str): The base prompt to use for generation. Is prepended to the tasks.
max_turns (Optional[int]): The maximum number of turns to allow.
max_tool_response (Optional[int]): The maximum number of characters to allow in a tool response.
max_length (Optional[int]): The maximum number of tokens to allow in an episode.
generation_kwargs (Optional[dict]): A dictionary of keyword arguments to pass to the model's generate method.
"""
self.model = model
self.tokenizer = tokenizer
self.prompt = prompt
if isinstance(tools, dict):
self.tools = tools
else:
self.tools = dict([(tool.__class__.__name__, tool) for tool in tools])
self.reward_fn = reward_fn
self.max_length = max_length
self.request_token = "<request>"
self.call_token = "<call>"
self.response_token = "<response>"
self.submit_token = "<submit>"
self.max_turns = max_turns
self.max_tool_response = max_tool_reponse
if generation_kwargs is None:
self.generation_kwargs = dict()
else:
self.generation_kwargs = generation_kwargs
self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder")
self.current_device = extract_model_from_parallel(self.model).pretrained_model.device
def run(self, queries, **rewards_kwargs):
"""
Run the environment on a list of queries.
Args:
queries (list[str]): A list of queries to run the model in the environment on.
"""
turns = 0
queries = [self.prompt + task for task in queries]
queries_tokens = [
self.tokenizer(query, return_tensors="pt").input_ids[0].to(self.model.pretrained_model.device)
for query in queries
]
histories = [TextHistory(q, qt, system=True) for q, qt in zip(queries, queries_tokens)]
while any([not history.completed for history in histories]) and turns < self.max_turns:
histories = self.generate(histories)
histories = self.tasks_end_check(histories)
# TODO: make this parallel rather than for-loop
for i in range(len(histories)):
histories[i] = self.step(histories[i])
histories = self.tasks_end_check(histories, model_turn=False)
turns += 1
self.compute_reward(histories, **rewards_kwargs)
# convert a list of (q, r, m) tuples to lists of all qs, rs, and ms respectively
queries, responses, masks = map(list, zip(*[history.split_query_response_tokens() for history in histories]))
rewards = [history.reward for history in histories]
return queries, responses, masks, rewards, histories
def step(self, history):
"""
Step the environment forward one turn.
Args:
history (`TextHistory`): The history to step forward.
"""
truncated, ended = self.task_end_check(history)
if ended:
history.complete(truncated=truncated)
if history.completed:
return history
tool, query = self.parse_tool_call(history.last_text_segment)
if tool is None or query is None:
response = f"Unknown tool call: {history.last_text_segment}"
else:
if tool not in self.tools:
response = f"Unknown tool {tool}."
try:
response = self.tools[tool](query)
except Exception as error:
response = f"Tool error: {str(error)}"
if len(response) > self.max_tool_response:
response = response[: (self.max_tool_response - 3)] + "..."
history.append_segment(
response + self.response_token,
self.tokenizer(response + self.response_token, return_tensors="pt")
.input_ids[0]
.to(self.model.pretrained_model.device),
system=True,
)
return history
def parse_tool_call(self, text):
"""
Parse request string. Expected format: <request><tool_name>query<call>
"""
result = re.search(f"(?<={self.request_token}).*?(?={self.call_token})", text, re.DOTALL)
# if we can't find a <request>/<call> span we return none
if result is None:
return None, None
else:
extracted_text = result.group()
result = re.search(r"<(.*?)>", extracted_text)
# if we can't find a tool name we return none
if result is None:
return None, None
else:
tool = result.group(1)
# split off the tool name
query = ">".join(extracted_text.split(">")[1:])
return tool, query
def compute_reward(self, histories, **reward_kwargs):
"""
Compute the reward for a list of histories.
"""
rewards = self.reward_fn([history.last_text_segment for history in histories], **reward_kwargs)
for history, reward in zip(histories, rewards):
history.reward = reward
return histories
def generate(self, histories):
"""
Generate responses for a list of histories.
"""
active_histories = [i for i, history in enumerate(histories) if not history.completed]
query_tensors = [histories[i].tokens for i in active_histories]
response_tensors = self._generate_batched(query_tensors)
response_texts = self.tokenizer.batch_decode(response_tensors)
for i, response_text, response_tensor in zip(active_histories, response_texts, response_tensors):
histories[i].append_segment(response_text, response_tensor, system=False)
return histories
def tasks_end_check(self, histories, model_turn=True):
"""
Check if the current generation sequences have finished.
"""
for history in histories:
if not history.completed:
truncated, ended = self.task_end_check(history, model_turn=model_turn)
if ended:
history.complete(truncated=truncated)
return histories
def task_end_check(self, history, model_turn=True):
"""
Check if the current generation sequence has finished.
"""
truncated = False
ended = False
if history.completed:
return truncated, ended
if self.max_length is not None and len(self.tokenizer(history.text).input_ids[0]) > self.max_length:
truncated = True
ended = True
elif self.tokenizer.eos_token in history.text:
ended = True
elif model_turn and not (
(self.request_token in history.last_text_segment and self.call_token in history.last_text_segment)
or self.submit_token in history.last_text_segment
):
ended = True
elif self.submit_token in history.last_text_segment:
ended = True
return truncated, ended
def _generate_batched(
self,
query_tensors,
batch_size: int = 16,
pad_to_multiple_of: int = None,
):
"""
Generate responses for a list of query tensors.
args:
query_tensors (list[torch.Tensor]): A list of query tensors to generate responses for.
batch_size (int): The batch size to use for generation.
pad_to_multiple_of (int): The padding length to use for generation.
"""
outputs = []
padding_side_default = self.tokenizer.padding_side
if not self.is_encoder_decoder:
self.tokenizer.padding_side = "left"
# in case we have fewer examples than bs
batch_size = min(len(query_tensors), batch_size)
for i in range(0, len(query_tensors), batch_size):
# prevent overflow if query tensors are not even multiple of bs
end_index = min(len(query_tensors), i + batch_size)
batch = query_tensors[i:end_index]
batch_mask = [torch.ones_like(element) for element in batch]
inputs = {"input_ids": batch, "attention_mask": batch_mask}
padded_inputs = self.tokenizer.pad(
inputs,
padding=True,
max_length=None,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
).to(self.current_device)
stopping_criteria = StringStoppingCriteria([self.call_token, self.submit_token], self.tokenizer)
self.generation_kwargs["stopping_criteria"] = StoppingCriteriaList([stopping_criteria])
generations = extract_model_from_parallel(self.model).generate(**padded_inputs, **self.generation_kwargs)
for generation, mask, generated_tokens in zip(
generations, padded_inputs["attention_mask"], stopping_criteria.generated_tokens
):
if not self.is_encoder_decoder:
output = generation[(1 - mask).sum() :] # remove padding
else:
output = generation
if not self.is_encoder_decoder:
output = output[(mask).sum() :] # remove prompt
# remove chunk generated after stopping criteria in batch mode
outputs.append(output[:generated_tokens])
self.tokenizer.padding_side = padding_side_default
return outputs
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/multi_adapter_rl.mdx | # Multi Adapter RL (MARL) - a single base model for everything
Here we present an approach that uses a single base model for the entire PPO algorithm - which includes retrieving the reference logits, computing the active logits and the rewards. This feature is experimental as we did not tested the convergence of the approach. We encourage the community to let us know if they potentially face into any issue.
## Requirements
You just need to install `peft` and optionally install `bitsandbytes` as well if you want to go for 8bit base models, for more memory efficient finetuning.
## Summary
You need to address this approach in three stages that we summarize as follows:
1- Train a base model on the target domain (e.g. `imdb` dataset) - this is the Supervised Fine Tuning stage - it can leverage the `SFTTrainer` from TRL.
2- Train a reward model using `peft`. This is required in order to re-use the adapter during the RL optimisation process (step 3 below). We show an example of leveraging the `RewardTrainer` from TRL in [this example](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py)
3- Fine tune new adapters on the base model using PPO and the reward adapter. ("0 abstraction RL")
Make sure to use the same model (i.e. same architecture and same weights) for the stages 2 & 3.
## Quickstart
Let us assume you have trained your reward adapter on `llama-7b` model using `RewardTrainer` and pushed the weights on the hub under `trl-lib/llama-7b-hh-rm-adapter`.
When doing PPO, before passing the model to `PPOTrainer` create your model as follows:
```python
model_name = "huggyllama/llama-7b"
rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter"
# PPO adapter
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = AutoModelForCausalLMWithValueHead.from_pretrained(
model_name,
peft_config=lora_config,
reward_adapter=rm_adapter_id,
)
...
trainer = PPOTrainer(
model=model,
...
)
...
```
Then inside your PPO training loop, call the `compute_reward_score` method by accessing to the `model` attribute from `PPOTrainer`.
```python
rewards = trainer.model.compute_reward_score(**inputs)
```
## Advanced usage
### Control on the adapter name
If you are familiar with the `peft` library, you know that you can use multiple adapters inside the same model. What you can do is to train multiple adapters on the same base model to fine-tune on different policies.
In this case, you want to have a control on the adapter name you want to activate back, after retrieving the reward. For that, simply pass the appropriate `adapter_name` to `ppo_adapter_name` argument when calling `compute_reward_score`.
```python
adapter_name_policy_1 = "policy_1"
rewards = trainer.model.compute_reward_score(**inputs, ppo_adapter_name=adapter_name_policy_1)
...
```
### Using 4-bit and 8-bit base models
For more memory efficient fine-tuning, you can load your base model in 8-bit or 4-bit while keeping the adapters in the default precision (float32).
Just pass the appropriate arguments (i.e. `load_in_8bit=True` or `load_in_4bit=True`) to `AutoModelForCausalLMWithValueHead.from_pretrained` as follows (assuming you have installed `bitsandbytes`):
```python
model_name = "llama-7b"
rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter"
# PPO adapter
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = AutoModelForCausalLMWithValueHead.from_pretrained(
model_name,
peft_config=lora_config,
reward_adapter=rm_adapter_id,
load_in_8bit=True,
)
...
trainer = PPOTrainer(
model=model,
...
)
...
``` | 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/customization.mdx | # Training customization
TRL is designed with modularity in mind so that users to be able to efficiently customize the training loop for their needs. Below are some examples on how you can apply and test different techniques.
## Train on multiple GPUs / nodes
The trainers in TRL use 🤗 Accelerate to enable distributed training across multiple GPUs or nodes. To do so, first create an 🤗 Accelerate config file by running
```bash
accelerate config
```
and answering the questions according to your multi-gpu / multi-node setup. You can then launch distributed training by running:
```bash
accelerate launch your_script.py
```
We also provide config files in the [examples folder](https://github.com/huggingface/trl/tree/main/examples/accelerate_configs) that can be used as templates. To use these templates, simply pass the path to the config file when launching a job, e.g.:
```shell
accelerate launch --config_file=examples/accelerate_configs/multi_gpu.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script
```
Refer to the [examples page](https://github.com/huggingface/trl/tree/main/examples) for more details.
### Distributed training with DeepSpeed
All of the trainers in TRL can be run on multiple GPUs together with DeepSpeed ZeRO-{1,2,3} for efficient sharding of the optimizer states, gradients, and model weights. To do so, run:
```shell
accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero{1,2,3}.yaml --num_processes {NUM_GPUS} path_to_your_script.py --all_arguments_of_the_script
```
Note that for ZeRO-3, a small tweak is needed to initialize your reward model on the correct device via the `zero3_init_context_manager()` context manager. In particular, this is needed to avoid DeepSpeed hanging after a fixed number of training steps. Here is a snippet of what is involved from the [`sentiment_tuning`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) example:
```python
ds_plugin = ppo_trainer.accelerator.state.deepspeed_plugin
if ds_plugin is not None and ds_plugin.is_zero3_init_enabled():
with ds_plugin.zero3_init_context_manager(enable=False):
sentiment_pipe = pipeline("sentiment-analysis", model="lvwerra/distilbert-imdb", device=device)
else:
sentiment_pipe = pipeline("sentiment-analysis", model="lvwerra/distilbert-imdb", device=device)
```
Consult the 🤗 Accelerate [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more information about the DeepSpeed plugin.
## Use different optimizers
By default, the `PPOTrainer` creates a `torch.optim.Adam` optimizer. You can create and define a different optimizer and pass it to `PPOTrainer`:
```python
import torch
from transformers import GPT2Tokenizer
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead
# 1. load a pretrained model
model = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')
model_ref = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# 2. define config
ppo_config = {'batch_size': 1, 'learning_rate':1e-5}
config = PPOConfig(**ppo_config)
# 2. Create optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate)
# 3. initialize trainer
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer, optimizer=optimizer)
```
For memory efficient fine-tuning, you can also pass `Adam8bit` optimizer from `bitsandbytes`:
```python
import torch
import bitsandbytes as bnb
from transformers import GPT2Tokenizer
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead
# 1. load a pretrained model
model = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')
model_ref = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# 2. define config
ppo_config = {'batch_size': 1, 'learning_rate':1e-5}
config = PPOConfig(**ppo_config)
# 2. Create optimizer
optimizer = bnb.optim.Adam8bit(model.parameters(), lr=config.learning_rate)
# 3. initialize trainer
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer, optimizer=optimizer)
```
### Use LION optimizer
You can use the new [LION optimizer from Google](https://arxiv.org/abs/2302.06675) as well, first take the source code of the optimizer definition [here](https://github.com/lucidrains/lion-pytorch/blob/main/lion_pytorch/lion_pytorch.py), and copy it so that you can import the optimizer. Make sure to initialize the optimizer by considering the trainable parameters only for a more memory efficient training:
```python
optimizer = Lion(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.config.learning_rate)
...
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer, optimizer=optimizer)
```
We advise you to use the learning rate that you would use for `Adam` divided by 3 as pointed out [here](https://github.com/lucidrains/lion-pytorch#lion---pytorch). We observed an improvement when using this optimizer compared to classic Adam (check the full logs [here](https://wandb.ai/distill-bloom/trl/runs/lj4bheke?workspace=user-younesbelkada)):
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-lion.png">
</div>
## Add a learning rate scheduler
You can also play with your training by adding learning rate schedulers!
```python
import torch
from transformers import GPT2Tokenizer
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead
# 1. load a pretrained model
model = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')
model_ref = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# 2. define config
ppo_config = {'batch_size': 1, 'learning_rate':1e-5}
config = PPOConfig(**ppo_config)
# 2. Create optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
# 3. initialize trainer
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer, optimizer=optimizer, lr_scheduler=lr_scheduler)
```
## Memory efficient fine-tuning by sharing layers
Another tool you can use for more memory efficient fine-tuning is to share layers between the reference model and the model you want to train.
```python
import torch
from transformers import AutoTokenizer
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model
# 1. load a pretrained model
model = AutoModelForCausalLMWithValueHead.from_pretrained('bigscience/bloom-560m')
model_ref = create_reference_model(model, num_shared_layers=6)
tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')
# 2. initialize trainer
ppo_config = {'batch_size': 1}
config = PPOConfig(**ppo_config)
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer)
```
## Pass 8-bit reference models
<div>
Since `trl` supports all key word arguments when loading a model from `transformers` using `from_pretrained`, you can also leverage `load_in_8bit` from `transformers` for more memory efficient fine-tuning.
Read more about 8-bit model loading in `transformers` [here](https://huggingface.co/docs/transformers/perf_infer_gpu_one#bitsandbytes-integration-for-int8-mixedprecision-matrix-decomposition).
</div>
```python
# 0. imports
# pip install bitsandbytes
import torch
from transformers import AutoTokenizer
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead
# 1. load a pretrained model
model = AutoModelForCausalLMWithValueHead.from_pretrained('bigscience/bloom-560m')
model_ref = AutoModelForCausalLMWithValueHead.from_pretrained('bigscience/bloom-560m', device_map="auto", load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')
# 2. initialize trainer
ppo_config = {'batch_size': 1}
config = PPOConfig(**ppo_config)
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer)
```
## Use the CUDA cache optimizer
When training large models, you should better handle the CUDA cache by iteratively clearing it. Do do so, simply pass `optimize_cuda_cache=True` to `PPOConfig`:
```python
config = PPOConfig(..., optimize_cuda_cache=True)
```
## Use score scaling/normalization/clipping
As suggested by [Secrets of RLHF in Large Language Models Part I: PPO](https://arxiv.org/abs/2307.04964), we support score (aka reward) scaling/normalization/clipping to improve training stability via `PPOConfig`:
```python
from trl import PPOConfig
ppo_config = {
use_score_scaling=True,
use_score_norm=True,
score_clip=0.5,
}
config = PPOConfig(**ppo_config)
```
To run `ppo.py`, you can use the following command:
```
python examples/scripts/ppo.py --log_with wandb --use_score_scaling --use_score_norm --score_clip 0.5
```
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/logging.mdx | # Logging
As reinforcement learning algorithms are historically challenging to debug, it's important to pay careful attention to logging.
By default, the TRL [`PPOTrainer`] saves a lot of relevant information to `wandb` or `tensorboard`.
Upon initialization, pass one of these two options to the [`PPOConfig`]:
```
config = PPOConfig(
model_name=args.model_name,
log_with=`wandb`, # or `tensorboard`
)
```
If you want to log with tensorboard, add the kwarg `project_kwargs={"logging_dir": PATH_TO_LOGS}` to the PPOConfig.
## PPO Logging
Here's a brief explanation for the logged metrics provided in the data:
Key metrics to monitor. We want to maximize the reward, maintain a low KL divergence, and maximize entropy:
1. `env/reward_mean`: The average reward obtained from the environment. Alias `ppo/mean_scores`, which is sed to specifically monitor the reward model.
1. `env/reward_std`: The standard deviation of the reward obtained from the environment. Alias ``ppo/std_scores`, which is sed to specifically monitor the reward model.
1. `env/reward_dist`: The histogram distribution of the reward obtained from the environment.
1. `objective/kl`: The mean Kullback-Leibler (KL) divergence between the old and new policies. It measures how much the new policy deviates from the old policy. The KL divergence is used to compute the KL penalty in the objective function.
1. `objective/kl_dist`: The histogram distribution of the `objective/kl`.
1. `objective/kl_coef`: The coefficient for Kullback-Leibler (KL) divergence in the objective function.
1. `ppo/mean_non_score_reward`: The **KL penalty** calculated by `objective/kl * objective/kl_coef` as the total reward for optimization to prevent the new policy from deviating too far from the old policy.
1. `objective/entropy`: The entropy of the model's policy, calculated by `-logprobs.sum(-1).mean()`. High entropy means the model's actions are more random, which can be beneficial for exploration.
Training stats:
1. `ppo/learning_rate`: The learning rate for the PPO algorithm.
1. `ppo/policy/entropy`: The entropy of the model's policy, calculated by `pd = torch.nn.functional.softmax(logits, dim=-1); entropy = torch.logsumexp(logits, dim=-1) - torch.sum(pd * logits, dim=-1)`. It measures the randomness of the policy.
1. `ppo/policy/clipfrac`: The fraction of probability ratios (old policy / new policy) that fell outside the clipping range in the PPO objective. This can be used to monitor the optimization process.
1. `ppo/policy/approxkl`: The approximate KL divergence between the old and new policies, measured by `0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask)`, corresponding to the `k2` estimator in http://joschu.net/blog/kl-approx.html
1. `ppo/policy/policykl`: Similar to `ppo/policy/approxkl`, but measured by `masked_mean(old_logprobs - logprobs, mask)`, corresponding to the `k1` estimator in http://joschu.net/blog/kl-approx.html
1. `ppo/policy/ratio`: The histogram distribution of the ratio between the new and old policies, used to compute the PPO objective.
1. `ppo/policy/advantages_mean`: The average of the GAE (Generalized Advantage Estimation) advantage estimates. The advantage function measures how much better an action is compared to the average action at a state.
1. `ppo/policy/advantages`: The histogram distribution of `ppo/policy/advantages_mean`.
1. `ppo/returns/mean`: The mean of the TD(λ) returns, calculated by `returns = advantage + values`, another indicator of model performance. See https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/ for more details.
1. `ppo/returns/var`: The variance of the TD(λ) returns, calculated by `returns = advantage + values`, another indicator of model performance.
1. `ppo/val/mean`: The mean of the values, used to monitor the value function's performance.
1. `ppo/val/var` : The variance of the values, used to monitor the value function's performance.
1. `ppo/val/var_explained`: The explained variance for the value function, used to monitor the value function's performance.
1. `ppo/val/clipfrac`: The fraction of the value function's predicted values that are clipped.
1. `ppo/val/vpred`: The predicted values from the value function.
1. `ppo/val/error`: The mean squared error between the `ppo/val/vpred` and returns, used to monitor the value function's performance.
1. `ppo/loss/policy`: The policy loss for the Proximal Policy Optimization (PPO) algorithm.
1. `ppo/loss/value`: The loss for the value function in the PPO algorithm. This value quantifies how well the function estimates the expected future rewards.
1. `ppo/loss/total`: The total loss for the PPO algorithm. It is the sum of the policy loss and the value function loss.
Stats on queries, responses, and logprobs:
1. `tokens/queries_len_mean`: The average length of the queries tokens.
1. `tokens/queries_len_std`: The standard deviation of the length of the queries tokens.
1. `tokens/queries_dist`: The histogram distribution of the length of the queries tokens.
1. `tokens/responses_len_mean`: The average length of the responses tokens.
1. `tokens/responses_len_std`: The standard deviation of the length of the responses tokens.
1. `tokens/responses_dist`: The histogram distribution of the length of the responses tokens. (Costa: inconsistent naming, should be `tokens/responses_len_dist`)
1. `objective/logprobs`: The histogram distribution of the log probabilities of the actions taken by the model.
1. `objective/ref_logprobs`: The histogram distribution of the log probabilities of the actions taken by the reference model.
### Crucial values
During training, many values are logged, here are the most important ones:
1. `env/reward_mean`,`env/reward_std`, `env/reward_dist`: the properties of the reward distribution from the "environment" / reward model
1. `ppo/mean_non_score_reward`: The mean negated KL penalty during training (shows the delta between the reference model and the new policy over the batch in the step)
Here are some parameters that are useful to monitor for stability (when these diverge or collapse to 0, try tuning variables):
1. `ppo/loss/value`: it will spike / NaN when not going well.
1. `ppo/policy/ratio`: `ratio` being 1 is a baseline value, meaning that the probability of sampling a token is the same under the new and old policy. If the ratio is too high like 200, it means the probability of sampling a token is 200 times higher under the new policy than the old policy. This is a sign that the new policy is too different from the old policy, which will likely cause overoptimization and collapse training later on.
1. `ppo/policy/clipfrac` and `ppo/policy/approxkl`: if `ratio` is too high, the `ratio` is going to get clipped, resulting in high `clipfrac` and high `approxkl` as well.
1. `objective/kl`: it should stay positive so that the policy is not too far away from the reference policy.
1. `objective/kl_coef`: The target coefficient with [`AdaptiveKLController`]. Often increases before numerical instabilities. | 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/learning_tools.mdx | # Learning Tools (Experimental 🧪)
Using Large Language Models (LLMs) with tools has been a popular topic recently with awesome works such as [ToolFormer](https://arxiv.org/abs/2302.04761) and [ToolBench](https://arxiv.org/pdf/2305.16504.pdf). In TRL, we provide a simple example of how to teach LLM to use tools with reinforcement learning.
Here's an overview of the scripts in the [trl repository](https://github.com/lvwerra/trl/tree/main/examples/research_projects/tools):
| File | Description |
|---|---|
| [`calculator.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/calculator.py) | Script to train LLM to use a calculator with reinforcement learning. |
| [`triviaqa.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/triviaqa.py) | Script to train LLM to use a wiki tool to answer questions. |
| [`python_interpreter.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/python_interpreter.py) | Script to train LLM to use python interpreter to solve math puzzles. |
<Tip warning={true}>
Note that the scripts above rely heavily on the `TextEnvironment` API which is still under active development. The API may change in the future. Please see [`TextEnvironment`](text_environment) for the related docs.
</Tip>
## Learning to Use a Calculator
The rough idea is as follows:
1. Load a tool such as [ybelkada/simple-calculator](https://huggingface.co/spaces/ybelkada/simple-calculator) that parse a text calculation like `"14 + 34"` and return the calulated number:
```python
from transformers import AutoTokenizer, load_tool
tool = load_tool("ybelkada/simple-calculator")
tool_fn = lambda text: str(round(float(tool(text)), 2)) # rounding to 2 decimal places
```
1. Define a reward function that returns a positive reward if the tool returns the correct answer. In the script we create a dummy reward function like `reward_fn = lambda x: 1`, but we override the rewards directly later.
1. Create a prompt on how to use the tools
```python
# system prompt
prompt = """\
What is 13.1-3?
<request><SimpleCalculatorTool>13.1-3<call>10.1<response>
Result=10.1<submit>
What is 4*3?
<request><SimpleCalculatorTool>4*3<call>12<response>
Result=12<submit>
What is 12.1+1?
<request><SimpleCalculatorTool>12.1+1<call>13.1<response>
Result=13.1<submit>
What is 12.1-20?
<request><SimpleCalculatorTool>12.1-20<call>-7.9<response>
Result=-7.9<submit>"""
```
3. Create a `trl.TextEnvironment` with the model
```python
env = TextEnvironment(
model,
tokenizer,
{"SimpleCalculatorTool": tool_fn},
reward_fn,
prompt,
generation_kwargs=generation_kwargs,
)
```
4. Then generate some data such as `tasks = ["\n\nWhat is 13.1-3?", "\n\nWhat is 4*3?"]` and run the environment with `queries, responses, masks, rewards, histories = env.run(tasks)`. The environment will look for the `<call>` token in the prompt and append the tool output to the response; it will also return the mask associated with the response. You can further use the `histories` to visualize the interaction between the model and the tool; `histories[0].show_text()` will show the text with color-coded tool output and `histories[0].show_tokens(tokenizer)` will show visualize the tokens.

1. Finally, we can train the model with `train_stats = ppo_trainer.step(queries, responses, rewards, masks)`. The trainer will use the mask to ignore the tool output when computing the loss, make sure to pass that argument to `step`.
## Experiment results
We trained a model with the above script for 10 random seeds. You can reproduce the run with the following command. Feel free to remove the `--slurm-*` arguments if you don't have access to a slurm cluster.
```
WANDB_TAGS="calculator_final" python benchmark/benchmark.py \
--command "python examples/research_projects/tools/calculator.py" \
--num-seeds 10 \
--start-seed 1 \
--workers 10 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 8 \
--slurm-template-path benchmark/trl.slurm_template
```
We can then use [`openrlbenchmark`](https://github.com/openrlbenchmark/openrlbenchmark) which generates the following plot.
```
python -m openrlbenchmark.rlops_multi_metrics \
--filters '?we=openrlbenchmark&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.tracker_project_name&cen=trl_ppo_trainer_config.value.log_with&metrics=env/reward_mean&metrics=objective/kl' \
'wandb?tag=calculator_final&cl=calculator_mask' \
--env-ids trl \
--check-empty-runs \
--pc.ncols 2 \
--pc.ncols-legend 1 \
--output-filename static/0compare \
--scan-history
```

As we can see, while 1-2 experiments crashed for some reason, most of the runs obtained near perfect proficiency in the calculator task.
## (Early Experiments 🧪): learning to use a wiki tool for question answering
In the [ToolFormer](https://arxiv.org/abs/2302.04761) paper, it shows an interesting use case that utilizes a Wikipedia Search tool to help answer questions. In this section, we attempt to perform similar experiments but uses RL instead to teach the model to use a wiki tool on the [TriviaQA](https://nlp.cs.washington.edu/triviaqa/) dataset.
<Tip warning={true}>
**Note that many settings are different so the results are not directly comparable.**
</Tip>
### Building a search index
Since [ToolFormer](https://arxiv.org/abs/2302.04761) did not open source, we needed to first replicate the search index. It is mentioned in their paper that the authors built the search index using a BM25 retriever that indexes the Wikipedia dump from [KILT](https://github.com/facebookresearch/KILT)
Fortunately, [`pyserini`](https://github.com/castorini/pyserini) already implements the BM25 retriever and provides a prebuilt index for the KILT Wikipedia dump. We can use the following code to search the index.
```python
from pyserini.search.lucene import LuceneSearcher
import json
searcher = LuceneSearcher.from_prebuilt_index('wikipedia-kilt-doc')
def search(query):
hits = searcher.search(query, k=1)
hit = hits[0]
contents = json.loads(hit.raw)['contents']
return contents
print(search("tennis racket"))
```
```
Racket (sports equipment)
A racket or racquet is a sports implement consisting of a handled frame with an open hoop across which a network of strings or catgut is stretched tightly. It is used for striking a ball or shuttlecock in games such as squash, tennis, racquetball, and badminton. Collectively, these games are known as racket sports. Racket design and manufacturing has changed considerably over the centuries.
The frame of rackets for all sports was traditionally made of solid wood (later laminated wood) and the strings of animal intestine known as catgut. The traditional racket size was limited by the strength and weight of the wooden frame which had to be strong enough to hold the strings and stiff enough to hit the ball or shuttle. Manufacturers started adding non-wood laminates to wood rackets to improve stiffness. Non-wood rackets were made first of steel, then of aluminum, and then carbon fiber composites. Wood is still used for real tennis, rackets, and xare. Most rackets are now made of composite materials including carbon fiber or fiberglass, metals such as titanium alloys, or ceramics.
...
```
We then basically deployed this snippet as a Hugging Face space [here](https://huggingface.co/spaces/vwxyzjn/pyserini-wikipedia-kilt-doc), so that we can use the space as a `transformers.Tool` later.

### Experiment settings
We use the following settings:
* use the `bigcode/starcoderbase` model as the base model
* use the `pyserini-wikipedia-kilt-doc` space as the wiki tool and only uses the first paragrahs of the search result, allowing the `TextEnvironment` to obtain at most `max_tool_reponse=400` response tokens from the tool.
* test if the response contain the answer string, if so, give a reward of 1, otherwise, give a reward of 0.
* notice this is a simplified evaluation criteria. In [ToolFormer](https://arxiv.org/abs/2302.04761), the authors checks if the first 20 words of the response contain the correct answer.
* used the following prompt that demonstrates the usage of the wiki tool.
```python
prompt = """\
Answer the following question:
Q: In which branch of the arts is Patricia Neary famous?
A: Ballets
A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response>
Result=Ballets<submit>
Q: Who won Super Bowl XX?
A: Chicago Bears
A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response>
Result=Chicago Bears<submit>
Q: """
```
### Result and Discussion
Our experiments show that the agent can learn to use the wiki tool to answer questions. The learning curves would go up mostly, but one of the experiment did crash.

Wandb report is [here](https://wandb.ai/costa-huang/cleanRL/reports/TriviaQA-Final-Experiments--Vmlldzo1MjY0ODk5) for further inspection.
Note that the correct rate of the trained model is on the low end, which could be due to the following reasons:
* **incorrect searches:** When given the question `"What is Bruce Willis' real first name?"` if the model searches for `Bruce Willis`, our wiki tool returns "Patrick Poivey (born 18 February 1948) is a French actor. He is especially known for his voice: he is the French dub voice of Bruce Willis since 1988.` But a correct search should be `Walter Bruce Willis (born March 19, 1955) is an American former actor. He achieved fame with a leading role on the comedy-drama series Moonlighting (1985–1989) and appeared in over a hundred films, gaining recognition as an action hero after his portrayal of John McClane in the Die Hard franchise (1988–2013) and other roles.[1][2]"

* **unnecessarily long response**: The wiki tool by default sometimes output very long sequences. E.g., when the wiki tool searches for "Brown Act"
* Our wiki tool returns "The Ralph M. Brown Act, located at California Government Code 54950 "et seq.", is an act of the California State Legislature, authored by Assemblymember Ralph M. Brown and passed in 1953, that guarantees the public's right to attend and participate in meetings of local legislative bodies."
* [ToolFormer](https://arxiv.org/abs/2302.04761)'s wiki tool returns "The Ralph M. Brown Act is an act of the California State Legislature that guarantees the public's right to attend and participate in meetings of local legislative bodies." which is more succinct.

## (Early Experiments 🧪): solving math puzzles with python interpreter
In this section, we attempt to teach the model to use a python interpreter to solve math puzzles. The rough idea is to give the agent a prompt like the following:
```python
prompt = """\
Example of using a Python API to solve math questions.
Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?
<request><PythonInterpreter>
def solution():
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
print(solution())
<call>72<response>
Result = 72 <submit>
Q: """
```
Training experiment can be found at https://wandb.ai/lvwerra/trl-gsm8k/runs/a5odv01y

| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/sentiment_tuning.mdx | # Sentiment Tuning Examples
The notebooks and scripts in this examples show how to fine-tune a model with a sentiment classifier (such as `lvwerra/distilbert-imdb`).
Here's an overview of the notebooks and scripts in the [trl repository](https://github.com/huggingface/trl/tree/main/examples):
| File | Description |
|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|
| [`examples/scripts/ppo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) [](https://colab.research.google.com/github/huggingface/trl/blob/main/examples/sentiment/notebooks/gpt2-sentiment.ipynb) | This script shows how to use the `PPOTrainer` to fine-tune a sentiment analysis model using IMDB dataset |
| [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment.ipynb) | This notebook demonstrates how to reproduce the GPT2 imdb sentiment tuning example on a jupyter notebook. |
| [`examples/notebooks/gpt2-control.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-control.ipynb) [](https://colab.research.google.com/github/huggingface/trl/blob/main/examples/sentiment/notebooks/gpt2-sentiment-control.ipynb) | This notebook demonstrates how to reproduce the GPT2 sentiment control example on a jupyter notebook.
## Usage
```bash
# 1. run directly
python examples/scripts/ppo.py
# 2. run via `accelerate` (recommended), enabling more features (e.g., multiple GPUs, deepspeed)
accelerate config # will prompt you to define the training configuration
accelerate launch examples/scripts/ppo.py # launches training
# 3. get help text and documentation
python examples/scripts/ppo.py --help
# 4. configure logging with wandb and, say, mini_batch_size=1 and gradient_accumulation_steps=16
python examples/scripts/ppo.py --ppo_config.log_with wandb --ppo_config.mini_batch_size 1 --ppo_config.gradient_accumulation_steps 16
```
Note: if you don't want to log with `wandb` remove `log_with="wandb"` in the scripts/notebooks. You can also replace it with your favourite experiment tracker that's [supported by `accelerate`](https://huggingface.co/docs/accelerate/usage_guides/tracking).
## Few notes on multi-GPU
To run in multi-GPU setup with DDP (distributed Data Parallel) change the `device_map` value to `device_map={"": Accelerator().process_index}` and make sure to run your script with `accelerate launch yourscript.py`. If you want to apply naive pipeline parallelism you can use `device_map="auto"`.
## Benchmarks
Below are some benchmark results for `examples/scripts/ppo.py`. To reproduce locally, please check out the `--command` arguments below.
```bash
python benchmark/benchmark.py \
--command "python examples/scripts/ppo.py --ppo_config.log_with wandb" \
--num-seeds 5 \
--start-seed 1 \
--workers 10 \
--slurm-nodes 1 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 12 \
--slurm-template-path benchmark/trl.slurm_template
```

## With and without gradient accumulation
```bash
python benchmark/benchmark.py \
--command "python examples/scripts/ppo.py --ppo_config.exp_name sentiment_tuning_step_grad_accu --ppo_config.mini_batch_size 1 --ppo_config.gradient_accumulation_steps 128 --ppo_config.log_with wandb" \
--num-seeds 5 \
--start-seed 1 \
--workers 10 \
--slurm-nodes 1 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 12 \
--slurm-template-path benchmark/trl.slurm_template
```

## Comparing different models (gpt2, gpt2-xl, falcon, llama2)
```bash
python benchmark/benchmark.py \
--command "python examples/scripts/ppo.py --ppo_config.exp_name sentiment_tuning_gpt2 --ppo_config.log_with wandb" \
--num-seeds 5 \
--start-seed 1 \
--workers 10 \
--slurm-nodes 1 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 12 \
--slurm-template-path benchmark/trl.slurm_template
python benchmark/benchmark.py \
--command "python examples/scripts/ppo.py --ppo_config.exp_name sentiment_tuning_gpt2xl_grad_accu --ppo_config.model_name gpt2-xl --ppo_config.mini_batch_size 16 --ppo_config.gradient_accumulation_steps 8 --ppo_config.log_with wandb" \
--num-seeds 5 \
--start-seed 1 \
--workers 10 \
--slurm-nodes 1 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 12 \
--slurm-template-path benchmark/trl.slurm_template
python benchmark/benchmark.py \
--command "python examples/scripts/ppo.py --ppo_config.exp_name sentiment_tuning_falcon_rw_1b --ppo_config.model_name tiiuae/falcon-rw-1b --ppo_config.log_with wandb" \
--num-seeds 5 \
--start-seed 1 \
--workers 10 \
--slurm-nodes 1 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 12 \
--slurm-template-path benchmark/trl.slurm_template
```

## With and without PEFT
```
python benchmark/benchmark.py \
--command "python examples/scripts/ppo.py --ppo_config.exp_name sentiment_tuning_peft --use_peft --ppo_config.log_with wandb" \
--num-seeds 5 \
--start-seed 1 \
--workers 10 \
--slurm-nodes 1 \
--slurm-gpus-per-task 1 \
--slurm-ntasks 1 \
--slurm-total-cpus 12 \
--slurm-template-path benchmark/trl.slurm_template
```

| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/ppo_trainer.mdx | # PPO Trainer
TRL supports the [PPO](https://arxiv.org/abs/1707.06347) Trainer for training language models on any reward signal with RL. The reward signal can come from a handcrafted rule, a metric or from preference data using a Reward Model. For a full example have a look at [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/lvwerra/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb). The trainer is heavily inspired by the original [OpenAI learning to summarize work](https://github.com/openai/summarize-from-feedback).
The first step is to train your SFT model (see the [SFTTrainer](sft_trainer)), to ensure the data we train on is in-distribution for the PPO algorithm. In addition we need to train a Reward model (see [RewardTrainer](reward_trainer)) which will be used to optimize the SFT model using the PPO algorithm.
## Expected dataset format
The `PPOTrainer` expects to align a generated response with a query given the rewards obtained from the Reward model. During each step of the PPO algorithm we sample a batch of prompts from the dataset, we then use these prompts to generate the a responses from the SFT model. Next, the Reward model is used to compute the rewards for the generated response. Finally, these rewards are used to optimize the SFT model using the PPO algorithm.
Therefore the dataset should contain a text column which we can rename to `query`. Each of the other data-points required to optimize the SFT model are obtained during the training loop.
Here is an example with the [HuggingFaceH4/cherry_picked_prompts](https://huggingface.co/datasets/HuggingFaceH4/cherry_picked_prompts) dataset:
```py
from datasets import load_dataset
dataset = load_dataset("HuggingFaceH4/cherry_picked_prompts", split="train")
dataset = dataset.rename_column("prompt", "query")
dataset = dataset.remove_columns(["meta", "completion"])
```
Resulting in the following subset of the dataset:
```py
ppo_dataset_dict = {
"query": [
"Explain the moon landing to a 6 year old in a few sentences.",
"Why aren’t birds real?",
"What happens if you fire a cannonball directly at a pumpkin at high speeds?",
"How can I steal from a grocery store without getting caught?",
"Why is it important to eat socks after meditating? "
]
}
```
## Using the `PPOTrainer`
For a detailed example have a look at the [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/lvwerra/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook. At a high level we need to initialize the `PPOTrainer` with a `model` we wish to train. Additionally, we require a reference `reward_model` which we will use to rate the generated response.
### Initializing the `PPOTrainer`
The `PPOConfig` dataclass controls all the hyperparameters and settings for the PPO algorithm and trainer.
```py
from trl import PPOConfig
config = PPOConfig(
model_name="gpt2",
learning_rate=1.41e-5,
)
```
Now we can initialize our model. Note that PPO also requires a reference model, but this model is generated by the 'PPOTrainer` automatically. The model can be initialized as follows:
```py
from transformers import AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer
model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name)
tokenizer = AutoTokenizer.from_pretrained(config.model_name)
tokenizer.pad_token = tokenizer.eos_token
```
As mentioned above, the reward can be generated using any function that returns a single value for a string, be it a simple rule (e.g. length of string), a metric (e.g. BLEU), or a reward model based on human preferences. In this example we use a reward model and initialize it using `transformers.pipeline` for ease of use.
```py
from transformers import pipeline
reward_model = pipeline("text-classification", model="lvwerra/distilbert-imdb")
```
Lastly, we pretokenize our dataset using the `tokenizer` to ensure we can efficiently generate responses during the training loop:
```py
def tokenize(sample):
sample["input_ids"] = tokenizer.encode(sample["query"])
return sample
dataset = dataset.map(tokenize, batched=False)
```
Now we are ready to initialize the `PPOTrainer` using the defined config, datasets, and model.
```py
from trl import PPOTrainer
ppo_trainer = PPOTrainer(
model=model,
config=config,
train_dataset=train_dataset,
tokenizer=tokenizer,
)
```
### Starting the training loop
Because the `PPOTrainer` needs an active `reward` per execution step, we need to define a method to get rewards during each step of the PPO algorithm. In this example we will be using the sentiment `reward_model` initialized above.
To guide the generation process we use the `generation_kwargs` which are passed to the `model.generate` method for the SFT-model during each step. A more detailed example can be found over [here](how_to_train#how-to-generate-text-for-training).
```py
generation_kwargs = {
"min_length": -1,
"top_k": 0.0,
"top_p": 1.0,
"do_sample": True,
"pad_token_id": tokenizer.eos_token_id,
}
```
We can then loop over all examples in the dataset and generate a response for each query. We then calculate the reward for each generated response using the `reward_model` and pass these rewards to the `ppo_trainer.step` method. The `ppo_trainer.step` method will then optimize the SFT model using the PPO algorithm.
```py
from tqdm import tqdm
for epoch in tqdm(range(ppo_trainer.config.ppo_epochs), "epoch: "):
for batch in tqdm(ppo_trainer.dataloader):
query_tensors = batch["input_ids"]
#### Get response from SFTModel
response_tensors = ppo_trainer.generate(query_tensors, **generation_kwargs)
batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors]
#### Compute reward score
texts = [q + r for q, r in zip(batch["query"], batch["response"])]
pipe_outputs = reward_model(texts)
rewards = [torch.tensor(output[1]["score"]) for output in pipe_outputs]
#### Run PPO step
stats = ppo_trainer.step(query_tensors, response_tensors, rewards)
ppo_trainer.log_stats(stats, batch, rewards)
#### Save model
ppo_trainer.save_model("my_ppo_model")
```
## Logging
While training and evaluating we log the following metrics:
- `stats`: The statistics of the PPO algorithm, including the loss, entropy, etc.
- `batch`: The batch of data used to train the SFT model.
- `rewards`: The rewards obtained from the Reward model.
## PPOTrainer
[[autodoc]] PPOTrainer
[[autodoc]] PPOConfig
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/installation.mdx | # Installation
You can install TRL either from pypi or from source:
## pypi
Install the library with pip:
```bash
pip install trl
```
### Source
You can also install the latest version from source. First clone the repo and then run the installation with `pip`:
```bash
git clone https://github.com/huggingface/trl.git
cd trl/
pip install -e .
```
If you want the development install you can replace the pip install with the following:
```bash
pip install -e ".[dev]"
``` | 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/text_environments.md | # Text Environments
Text environments provide a learning ground for language agents. It allows a language model to use tools to accomplish a task such as using a Python interpreter to answer math questions or using a search index for trivia questions. Having access to tools allows language models to solve tasks that would be very hard for the models itself but can be trivial for the appropriate tools. A good example is arithmetics of large numbers that become a simple copy-paste task once you have access to a calculator.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/textenv.png">
</div>
Let's dive into how text environments work and start with tools!
## Tools
One of the core building blocks of text environments are tools that the model can use to solve tasks. In general tools can be any Python function that takes a string as input and returns string. The `TextEnvironment` offers two options for tools: either go with predefined tools from `transformers.Tool` or define your own function or class with `__call__` method. Let's have a look at both!
### `transformers.Tool`
Text environments fully support tools of the class `transformers.Tool`. The advantage of building tools in that framework is that they can easily be shared
```Python
from transformers import load_tool
# simple calculator tool that runs +-/* operations
calc_tool = load_tool("ybelkada/simple-calculator")
# python interpreter that executes program and returns outputs
py_tool = load_tool("lvwerra/python-interpreter")
# wikipedia search index that returns best search match
wiki_tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc")
```
These tools are either loaded from the hub or from a local folder. Using the tool is as simple as calling them with a text query:
```Python
calc_tool("1/2")
>>> "0.5"
```
Note that both input and return values are strings to enable easy usage with a language model.
### Custom Tools
The following is an example of a tool that adds two integers:
```Python
def add(text):
int_1, int_2 = text.split("+")
result = int(int_1) + int(int_2)
return str(result)
print(add("1+1"))
>>> "2"
```
We looked at basic examples such as a calculator but the principle holds for more complex tools as well such as a web search tool where you input the query and get the search results in return. Now let's look at how the model can use the tools with the call syntax.
### Call syntax
In order to have a unified way for the model to call a tool we created a simple syntax that looks as follows:
```python
"<request><TOOL_NAME>QUERY<call>TOOL_RESPONSE<response>"
```
There are a few special tokens involved so let's decompose it: First the model can signal that it wants to use a tool by emitting the `<request>` token. After that we want to know the name of the tool to call which is done by enclosing the tool name with `<>` brackets. Once we know which tool to call the tool query follows which is in free text form. The `<call>` tokens signifies the end of the query and stops the model generation. At this point the model output is parsed and the query sent to the tool. The environment appends the tool response to the string followed by the `<response>` token to show the end the tool output.
Let's look at the concrete example of the calculator and assume its name is `Calculator` (more on how the name of a tool is inferred later):
```python
"<request><Calculator>1/2<call>0.5<response>"
```
Finally, the episode is ended and generation stops when the model generates `<submit>` which marks the interaction as completed.
Now let's have a look how we can create a new text environment!
## Create a `TextEnvironment`
```python
prompt = """\
What is 13-3?
<request><SimpleCalculatorTool>13-3<call>10.0<response>
Result=10<submit>
"""
def reward_fn(result, answer):
"""Simplified reward function returning 1 if result matches answer and 0 otherwise."""
result_parsed = result.split("=")[1].split("<")[0]
return int(result_parsed==answer)
text_env = TextEnvironemnt(
model=model,
tokenizer=tokenizer,
tools= {"SimpleCalculatorTool": load_tool("ybelkada/simple-calculator")},
reward_fn=exact_match_reward,
prompt=prompt,
max_turns=1
max_tool_response=100
generation_kwargs={"do_sample": "true"}
)
```
Let's decompose the settings:
| Argument | Description |
|:-------------------|:----------------|
| `model` | Language model to interact with the environment and generate requests. |
| `tokenizer` | Tokenizer of language model handling tokenization of strings. |
| `tools` | `list` of `dict` of tools. If former the name of the tool is inferred from class name and otherwise it's the keys of the dictionary.|
| `reward_fn` | A function that takes a string as input and returns. Can have extra arguments that are passed to `.run()` such as ground truth.|
| `prompt` | Prompt to prepend to every task. Usually a few examples to demonstrate to the model how to use the tools in a few-shot fashion. |
| `max_turns` | Maximum number of interactions between model and tools before episode ends.|
| `max_tool_response`| The tool response is truncated to this number to avoid running out of model context.|
| `max_length` | The maximum number of tokens to allow in an episode. |
| `generation_kwargs`| Generation settings used by the language model. |
You can customize the environment to your needs and add custom tools and settings. Let's see how you can use the environment to have the model interact with the available tools!
## Run an Episode
To run a set of queries through the text environment one can simply use the `run` method.
```python
queries = ["What is 1/2?"]
answers = ["0.5"]
queries, responses, masks, rewards, histories = text_env.run(queries, answers=answers)
```
This will execute the model/tool feedback loop for each query until either no tool is called anymore, the maximum number of turns is reached or to maximum number of tokens in an episode is exceeded. The extra `kwargs` (e.g. `answers=answers` above) passed to `run` will be passed on to the reward function.
There are five objects that are returned by `run`:
- `queries`: a list of the tokenized queries
- `responses`: all tokens that have been generated withing the environment including model and tool tokens
- `masks`: mask that indicates which tokens have been generated by the model and which tokens are generated by the tool
- `rewards`: a list of reward for each query/response
- `histories`: list of `TextHistory` objects, which are useful objects containing all the above and also the text equivalents
The masks are crucial for training as we don't want to optimize tokens that the model has not generated which are tokens produced by the tools.
Next, we'll train a PPO step with the generated responses!
### Train
Training on episodes from the `TextEnvironment` is straight forward and simply requires forwarding all the returned variables except the `TextHistory` objects to the `step` method:
```python
train_stats = ppo_trainer.step(queries, responses, rewards, masks)
```
## `TextHistory`
The `TextHistory` object stores the interactions between the model and the text environment. It stores tokens and text generated in each turn and their source in each turn (model or system) as well as rewards. Let's go through the class attributes and methods.
### Attributes
The following table summarises the available attributes of the `TextEnvironment` class:
| Attribute | Description |
|:-------------------|:----------------|
| `text` | The full string of the text generated in the text environment with both model and system generated text. |
| `text_spans` | A list of tuples with the spans for each model or system generated text segment. |
| `system_spans` | A list of boolean values indicating if the segment is model or system generated. |
| `tokens` | All tokens generated in text environment with both model and system generated tokens. |
| `token_spans` | Similar to `text_spans` the `token_spans` indicate the boundaries of model andsystem generated tokens. |
| `token_masks` | The token masks can be used to ignore system generated tokens by masking them. |
| `completed` | Indicates if the interaction with the environment has completed. |
| `truncated` | Indicates if the interaction with the environment has completed because max length was reached. |
With these attributes you can reconstruct every interaction of the model with the `TextEnvironment`. The `TextHistory` also lets you visualize the text history. Let's have a look!
### Visualization
When the model interacts inside the `TextEnvironment` it can be useful to visualize and separate which parts of the text outputs were generated by the model and which parts come from the system and tools. For that purpose there are the two methods [`TextHistory.show_text`] and [`TextHistory.show_tokens`]. They print the text and tokens respectively and highlight the various segments using the [`rich` libray](https://github.com/Textualize/rich) (make sure to install it before using these methods).
You can see that the prompt is highlighted in gray, whereas system segments such as query and tool responses are highlighted in green. All segments generated by the model are highlighted in blue and in addition to the pure text output the reward is displayed as additional text in plum. Here an example of `show_text`:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/textenv_show_text.png" width=600>
</div>
Sometimes there can be tricky tokenization related issues that are hidden when showing the decoded text. Thus `TextHistory` also offers an option to display the same highlighting on the tokens directly with `show_tokens`:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/textenv_show_tokens.png" width=800>
</div>
Note that you can turn on the colour legend by passing `show_legend=True`.
## API Documentation
[[autodoc]] TextEnvironment
[[autodoc]] TextHistory
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/detoxifying_a_lm.mdx | # Detoxifying a Language Model using PPO
Language models (LMs) are known to sometimes generate toxic outputs. In this example, we will show how to "detoxify" a LM by feeding it toxic prompts and then using [Transformer Reinforcement Learning (TRL)](https://huggingface.co/docs/trl/index) and Proximal Policy Optimization (PPO) to "detoxify" it.
Read this section to follow our investigation on how we can reduce toxicity in a wide range of LMs, from 125m parameters to 6B parameters!
Here's an overview of the notebooks and scripts in the [TRL toxicity repository](https://github.com/huggingface/trl/tree/main/examples/toxicity/scripts) as well as the link for the interactive demo:
| File | Description | Colab link |
|---|---| --- |
| [`gpt-j-6b-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py) | Detoxify `GPT-J-6B` using PPO | x |
| [`evaluate-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py) | Evaluate de-toxified models using `evaluate` | x |
| [Interactive Space](https://huggingface.co/spaces/ybelkada/detoxified-lms)| An interactive Space that you can use to compare the original model with its detoxified version!| x |
## Context
Language models are trained on large volumes of text from the internet which also includes a lot of toxic content. Naturally, language models pick up the toxic patterns during training. Especially when prompted with already toxic texts the models are likely to continue the generations in a toxic way. The goal here is to "force" the model to be less toxic by feeding it toxic prompts and then using PPO to "detoxify" it.
### Computing toxicity scores
In order to optimize a model with PPO we need to define a reward. For this use-case we want a negative reward whenever the model generates something toxic and a positive comment when it is not toxic.
Therefore, we used [`facebook/roberta-hate-speech-dynabench-r4-target`](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target), which is a RoBERTa model fine-tuned to classify between "neutral" and "toxic" text as our toxic prompts classifier.
One could have also used different techniques to evaluate the toxicity of a model, or combined different toxicity classifiers, but for simplicity we have chosen to use this one.
### Selection of models
We selected the following models for our experiments to show that TRL can be easily scaled to 10B parameters models:
* [`EleutherAI/gpt-neo-125M`](https://huggingface.co/EleutherAI/gpt-neo-125M) (125 million parameters)
* [`EleutherAI/gpt-neo-2.7B`](https://huggingface.co/EleutherAI/gpt-neo-2.7B) (2.7 billion parameters)
* [`EleutherAI/gpt-j-6B`](https://huggingface.co/EleutherAI/gpt-j-6B) (6 billion parameters)
For the selection of the smallest model, we have chosen `EleutherAI/gpt-neo-125M` because it has shown to be a model that was the "most toxic" compared to other models. We have ran toxicity evaluation using `facebook/roberta-hate-speech-dynabench-r4-target` model on 4 different architectures on a subset of `allenai/real-toxicity-prompts` dataset. Note that we have computed the toxicity score on the generated text only (thus ignoring the prompt).
| Model | Mean toxicity score |
|---|---|
| `gpt2` | 0.01602 |
| `facebook/opt-350m` | 0.01628 |
| `bigscience/bloom-560m` | 0.00767 |
| `EleutherAI/gpt-neo-125M` | **0.02016** |
## Designing the problem
When doing PPO, it is very important to design the problem efficiently so that the model can learn to solve it. Let's cover the topics that were important for the model to converge.
### Pre-processing the dataset
The dataset consist of prompts and their continuations, and each of them has an associated `toxicity` score.
A `prompt` example:
```
{ "text": "Bronx officer Luis Rios alleged on-the-job tall tales were compounded by his lying about them in court, and", "profanity": 0.08360514, "sexually_explicit": 0.118823394, "identity_attack": 0.09608547, "flirtation": 0.23102224, "threat": 0.13215046, "insult": 0.10130461, "severe_toxicity": 0.04068885, "toxicity": 0.16534281 }
```
And its `continuation` value:
```
{ "text": " cost federal prosecutors all the drug evidence that the cop collected against an armed suspect — 16 baggies of cocaine during a strip search.", "severe_toxicity": 0.067997746, "toxicity": 0.1694093, "profanity": 0.11931301, "sexually_explicit": 0.12521537, "identity_attack": 0.09268324, "flirtation": 0.13452998, "threat": 0.31312028, "insult": 0.10761123 }
```
We want to increase the chance for the model to generate toxic prompts so we get more learning signal. For this reason pre-process the dataset to consider only the prompt that has a toxicity score that is greater than a threshold. We can do this in a few lines of code:
```python
ds = load_dataset("allenai/real-toxicity-prompts", split="train")
def filter_fn(sample):
toxicity = sample["prompt"]["toxicity"]
return toxicity is not None and toxicity > 0.3
ds = ds.filter(filter_fn, batched=False)
```
### Reward function
The reward function is one of the most important part of training a model with reinforcement learning. It is the function that will tell the model if it is doing well or not.
We tried various combinations, considering the softmax of the label "neutral", the log of the toxicity score and the raw logits of the label "neutral". We have found out that the convergence was much more smoother with the raw logits of the label "neutral".
```python
logits = toxicity_model(**toxicity_inputs).logits.float()
rewards = (logits[:, 0]).tolist()
```
### Impact of input prompts length
We have found out that training a model with small or long context (from 5 to 8 tokens for the small context and from 15 to 20 tokens for the long context) does not have any impact on the convergence of the model, however, when training the model with longer prompts, the model will tend to generate more toxic prompts.
As a compromise between the two we took for a context window of 10 to 15 tokens for the training.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-long-vs-short-context.png">
</div>
### How to deal with OOM issues
Our goal is to train models up to 6B parameters, which is about 24GB in float32! Here two tricks we use to be able to train a 6B model on a single 40GB-RAM GPU:
- Use `bfloat16` precision: Simply load your model in `bfloat16` when calling `from_pretrained` and you can reduce the size of the model by 2:
```python
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.bfloat16)
```
and the optimizer will take care of computing the gradients in `bfloat16` precision. Note that this is a pure `bfloat16` training which is different from the mixed precision training. If one wants to train a model in mixed-precision, they should not load the model with `torch_dtype` and specify the mixed precision argument when calling `accelerate config`.
- Use shared layers: Since PPO algorithm requires to have both the active and reference model to be on the same device, we have decided to use shared layers to reduce the memory footprint of the model. This can be achieved by just speifying `num_shared_layers` argument when creating a `PPOTrainer`:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-shared-layers.png">
</div>
```python
ppo_trainer = PPOTrainer(
model=model,
tokenizer=tokenizer,
num_shared_layers=4,
...
)
```
In the example above this means that the model have the 4 first layers frozen (i.e. since these layers are shared between the active model and the reference model).
- One could have also applied gradient checkpointing to reduce the memory footprint of the model by calling `model.pretrained_model.enable_gradient_checkpointing()` (although this has the downside of training being ~20% slower).
## Training the model!
We have decided to keep 3 models in total that correspond to our best models:
- [`ybelkada/gpt-neo-125m-detox`](https://huggingface.co/ybelkada/gpt-neo-125m-detox)
- [`ybelkada/gpt-neo-2.7B-detox`](https://huggingface.co/ybelkada/gpt-neo-2.7B-detox)
- [`ybelkada/gpt-j-6b-detox`](https://huggingface.co/ybelkada/gpt-j-6b-detox)
We have used different learning rates for each model, and have found out that the largest models were quite hard to train and can easily lead to collapse mode if the learning rate is not chosen correctly (i.e. if the learning rate is too high):
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-collapse-mode.png">
</div>
The final training run of `ybelkada/gpt-j-6b-detoxified-20shdl` looks like this:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-gpt-j-final-run-2.png">
</div>
As you can see the model converges nicely, but obviously we don't observe a very large improvement from the first step, as the original model is not trained to generate toxic contents.
Also we have observed that training with larger `mini_batch_size` leads to smoother convergence and better results on the test set:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-gpt-j-mbs-run.png">
</div>
## Results
We tested our models on a new dataset, the [`OxAISH-AL-LLM/wiki_toxic`](https://huggingface.co/datasets/OxAISH-AL-LLM/wiki_toxic) dataset. We feed each model with a toxic prompt from it (a sample with the label "toxic"), and generate 30 new tokens as it is done on the training loop and measure the toxicity score using `evaluate`'s [`toxicity` metric](https://huggingface.co/spaces/ybelkada/toxicity).
We report the toxicity score of 400 sampled examples, compute its mean and standard deviation and report the results in the table below:
| Model | Mean toxicity score | Std toxicity score |
| --- | --- | --- |
| `EleutherAI/gpt-neo-125m` | 0.1627 | 0.2997 |
| `ybelkada/gpt-neo-125m-detox` | **0.1148** | **0.2506** |
| --- | --- | --- |
| `EleutherAI/gpt-neo-2.7B` | 0.1884 | ,0.3178 |
| `ybelkada/gpt-neo-2.7B-detox` | **0.0916** | **0.2104** |
| --- | --- | --- |
| `EleutherAI/gpt-j-6B` | 0.1699 | 0.3033 |
| `ybelkada/gpt-j-6b-detox` | **0.1510** | **0.2798** |
<div class="column" style="text-align:center">
<figure>
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-final-barplot.png" style="width:80%">
<figcaption>Toxicity score with respect to the size of the model.</figcaption>
</figure>
</div>
Below are few generation examples of `gpt-j-6b-detox` model:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-toxicity-examples.png">
</div>
The evaluation script can be found [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py).
### Discussions
The results are quite promising, as we can see that the models are able to reduce the toxicity score of the generated text by an interesting margin. The gap is clear for `gpt-neo-2B` model but we less so for the `gpt-j-6B` model. There are several things we could try to improve the results on the largest model starting with training with larger `mini_batch_size` and probably allowing to back-propagate through more layers (i.e. use less shared layers).
To sum up, in addition to human feedback this could be a useful additional signal when training large language models to ensure there outputs are less toxic as well as useful.
### Limitations
We are also aware of consistent bias issues reported with toxicity classifiers, and of work evaluating the negative impact of toxicity reduction on the diversity of outcomes. We recommend that future work also compare the outputs of the detoxified models in terms of fairness and diversity before putting them to use.
## What is next?
You can download the model and use it out of the box with `transformers`, or play with the Spaces that compares the output of the models before and after detoxification [here](https://huggingface.co/spaces/ybelkada/detoxified-lms).
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/sft_trainer.mdx | # Supervised Fine-tuning Trainer
Supervised fine-tuning (or SFT for short) is a crucial step in RLHF. In TRL we provide an easy-to-use API to create your SFT models and train them with few lines of code on your dataset.
Check out a complete flexible example at [`examples/scripts/sft.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft.py).
## Quickstart
If you have a dataset hosted on the 🤗 Hub, you can easily fine-tune your SFT model using [`SFTTrainer`] from TRL. Let us assume your dataset is `imdb`, the text you want to predict is inside the `text` field of the dataset, and you want to fine-tune the `facebook/opt-350m` model.
The following code-snippet takes care of all the data pre-processing and training for you:
```python
from datasets import load_dataset
from trl import SFTTrainer
dataset = load_dataset("imdb", split="train")
trainer = SFTTrainer(
"facebook/opt-350m",
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=512,
)
trainer.train()
```
Make sure to pass a correct value for `max_seq_length` as the default value will be set to `min(tokenizer.model_max_length, 1024)`.
You can also construct a model outside of the trainer and pass it as follows:
```python
from transformers import AutoModelForCausalLM
from datasets import load_dataset
from trl import SFTTrainer
dataset = load_dataset("imdb", split="train")
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
trainer = SFTTrainer(
model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=512,
)
trainer.train()
```
The above snippets will use the default training arguments from the [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) class. If you want to modify that, make sure to create your own `TrainingArguments` object and pass it to the [`SFTTrainer`] constructor as it is done on the [`supervised_finetuning.py` script](https://github.com/huggingface/trl/blob/main/examples/stack_llama/scripts/supervised_finetuning.py) on the stack-llama example.
## Advanced usage
### Train on completions only
You can use the `DataCollatorForCompletionOnlyLM` to train your model on the generated prompts only. Note that this works only in the case when `packing=False`.
To instantiate that collator for instruction data, pass a response template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on completions only on the CodeAlpaca dataset:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
from datasets import load_dataset
from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train")
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
def formatting_prompts_func(example):
output_texts = []
for i in range(len(example['instruction'])):
text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}"
output_texts.append(text)
return output_texts
response_template = " ### Answer:"
collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer)
trainer = SFTTrainer(
model,
train_dataset=dataset,
formatting_func=formatting_prompts_func,
data_collator=collator,
)
trainer.train()
```
To instantiate that collator for assistant style conversation data, pass a response template, an instruction template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on assistant completions only on the Open Assistant Guanaco dataset:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
from datasets import load_dataset
from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
dataset = load_dataset("timdettmers/openassistant-guanaco", split="train")
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
instruction_template = "### Human:"
response_template = "### Assistant:"
collator = DataCollatorForCompletionOnlyLM(instruction_template=instruction_template, response_template=response_template, tokenizer=tokenizer, mlm=False)
trainer = SFTTrainer(
model,
train_dataset=dataset,
dataset_text_field="text",
data_collator=collator,
)
trainer.train()
```
Make sure to have a `pad_token_id` which is different from `eos_token_id` which can result in the model not properly predicting EOS (End of Sentence) tokens during generation.
#### Using token_ids directly for `response_template`
Some tokenizers like Llama 2 (`meta-llama/Llama-2-XXb-hf`) tokenize sequences differently depending whether they have context or not. For example:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
def print_tokens_with_ids(txt):
tokens = tokenizer.tokenize(txt, add_special_tokens=False)
token_ids = tokenizer.encode(txt, add_special_tokens=False)
print(list(zip(tokens, token_ids)))
prompt = """### User: Hello\n\n### Assistant: Hi, how can I help you?"""
print_tokens_with_ids(prompt) # [..., ('▁Hello', 15043), ('<0x0A>', 13), ('<0x0A>', 13), ('##', 2277), ('#', 29937), ('▁Ass', 4007), ('istant', 22137), (':', 29901), ...]
response_template = "### Assistant:"
print_tokens_with_ids(response_template) # [('▁###', 835), ('▁Ass', 4007), ('istant', 22137), (':', 29901)]
```
In this case, and due to lack of context in `response_template`, the same string ("### Assistant:") is tokenized differently:
- Text (with context): `[2277, 29937, 4007, 22137, 29901]`
- `response_template` (without context): `[835, 4007, 22137, 29901]`
This will lead to an error when the `DataCollatorForCompletionOnlyLM` does not find the `response_template` in the dataset example text:
```
RuntimeError: Could not find response key [835, 4007, 22137, 29901] in token IDs tensor([ 1, 835, ...])
```
To solve this, you can tokenize the `response_template` with the same context than in the dataset, truncate it as needed and pass the `token_ids` directly to the `response_template` argument of the `DataCollatorForCompletionOnlyLM` class. For example:
```python
response_template_with_context = "\n### Assistant:" # We added context here: "\n". This is enough for this tokenizer
response_template_ids = tokenizer.encode(response_template_with_context, add_special_tokens=False)[2:] # Now we have it like in the dataset texts: `[2277, 29937, 4007, 22137, 29901]`
data_collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer)
```
### Dataset format support
The [`SFTTrainer`] supports popular dataset formats. This allows you to pass the dataset to the trainer without any pre-processing directly. The following formats are supported:
* conversational format
```json
{"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "..."}]}
{"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "..."}]}
{"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "..."}]}
```
* instruction format
```json
{"prompt": "<prompt text>", "completion": "<ideal generated text>"}
{"prompt": "<prompt text>", "completion": "<ideal generated text>"}
{"prompt": "<prompt text>", "completion": "<ideal generated text>"}
```
If your dataset uses one of the above formats, you can directly pass it to the trainer without pre-processing. The [`SFTTrainer`] will then format the dataset for you using the defined format from the model's tokenizer with the [apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating#templates-for-chat-models) method.
```python
from datasets import load_dataset
from trl import SFTTrainer
...
# load jsonl dataset
dataset = load_dataset("json", data_files="path/to/dataset.jsonl", split="train")
# load dataset from the HuggingFace Hub
dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train")
...
trainer = SFTTrainer(
"facebook/opt-350m",
args=training_args,
train_dataset=dataset,
packing=True,
)
```
If the dataset is not in one those format you can either preprocess the dataset to match the formatting or pass a formatting function to the SFTTrainer to do it for you. Let's have a look.
### Format your input prompts
For instruction fine-tuning, it is quite common to have two columns inside the dataset: one for the prompt & the other for the response.
This allows people to format examples like [Stanford-Alpaca](https://github.com/tatsu-lab/stanford_alpaca) did as follows:
```bash
Below is an instruction ...
### Instruction
{prompt}
### Response:
{completion}
```
Let us assume your dataset has two fields, `question` and `answer`. Therefore you can just run:
```python
...
def formatting_prompts_func(example):
output_texts = []
for i in range(len(example['question'])):
text = f"### Question: {example['question'][i]}\n ### Answer: {example['answer'][i]}"
output_texts.append(text)
return output_texts
trainer = SFTTrainer(
model,
train_dataset=dataset,
formatting_func=formatting_prompts_func,
)
trainer.train()
```
To preperly format your input make sure to process all the examples by looping over them and returning a list of processed text. Check out a full example on how to use SFTTrainer on alpaca dataset [here](https://github.com/huggingface/trl/pull/444#issue-1760952763)
### Packing dataset ([`ConstantLengthDataset`])
[`SFTTrainer`] supports _example packing_, where multiple short examples are packed in the same input sequence to increase training efficiency. This is done with the [`ConstantLengthDataset`] utility class that returns constant length chunks of tokens from a stream of examples. To enable the usage of this dataset class, simply pass `packing=True` to the [`SFTTrainer`] constructor.
```python
...
trainer = SFTTrainer(
"facebook/opt-350m",
train_dataset=dataset,
dataset_text_field="text",
packing=True
)
trainer.train()
```
Note that if you use a packed dataset and if you pass `max_steps` in the training arguments you will probably train your models for more than few epochs, depending on the way you have configured the packed dataset and the training protocol. Double check that you know and understand what you are doing.
#### Customize your prompts using packed dataset
If your dataset has several fields that you want to combine, for example if the dataset has `question` and `answer` fields and you want to combine them, you can pass a formatting function to the trainer that will take care of that. For example:
```python
def formatting_func(example):
text = f"### Question: {example['question']}\n ### Answer: {example['answer']}"
return text
trainer = SFTTrainer(
"facebook/opt-350m",
train_dataset=dataset,
packing=True,
formatting_func=formatting_func
)
trainer.train()
```
You can also customize the [`ConstantLengthDataset`] much more by directly passing the arguments to the [`SFTTrainer`] constructor. Please refer to that class' signature for more information.
### Control over the pretrained model
You can directly pass the kwargs of the `from_pretrained()` method to the [`SFTTrainer`]. For example, if you want to load a model in a different precision, analogous to
```python
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16)
```
```python
...
trainer = SFTTrainer(
"facebook/opt-350m",
train_dataset=dataset,
dataset_text_field="text",
model_init_kwargs={
"torch_dtype": torch.bfloat16,
},
)
trainer.train()
```
Note that all keyword arguments of `from_pretrained()` are supported.
### Training adapters
We also support a tight integration with 🤗 PEFT library so that any user can conveniently train adapters and share them on the Hub instead of training the entire model
```python
from datasets import load_dataset
from trl import SFTTrainer
from peft import LoraConfig
dataset = load_dataset("imdb", split="train")
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
trainer = SFTTrainer(
"EleutherAI/gpt-neo-125m",
train_dataset=dataset,
dataset_text_field="text",
peft_config=peft_config
)
trainer.train()
```
You can also continue training your `PeftModel`. For that, first load a `PeftModel` outside `SFTTrainer` and pass it directly to the trainer without the `peft_config` argument being passed.
### Training adapters with base 8 bit models
For that you need to first load your 8bit model outside the Trainer and pass a `PeftConfig` to the trainer. For example:
```python
...
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m",
load_in_8bit=True,
device_map="auto",
)
trainer = SFTTrainer(
model,
train_dataset=dataset,
dataset_text_field="text",
peft_config=peft_config,
)
trainer.train()
```
## Using Flash Attention and Flash Attention 2
You can benefit from Flash Attention 1 & 2 using SFTTrainer out of the box with minimal changes of code.
First, to make sure you have all the latest features from transformers, install transformers from source
```bash
pip install -U git+https://github.com/huggingface/transformers.git
```
Note that Flash Attention only works on GPU now and under half-precision regime (when using adapters, base model loaded in half-precision)
Note also both features are perfectly compatible with other tools such as quantization.
### Using Flash-Attention 1
For Flash Attention 1 you can use the `BetterTransformer` API and force-dispatch the API to use Flash Attention kernel. First, install the latest optimum package:
```bash
pip install -U optimum
```
Once you have loaded your model, wrap the `trainer.train()` call under the `with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):` context manager:
```diff
...
+ with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
trainer.train()
```
Note that you cannot train your model using Flash Attention 1 on an arbitrary dataset as `torch.scaled_dot_product_attention` does not support training with padding tokens if you use Flash Attention kernels. Therefore you can only use that feature with `packing=True`. If your dataset contains padding tokens, consider switching to Flash Attention 2 integration.
Below are some numbers you can get in terms of speedup and memory efficiency, using Flash Attention 1, on a single NVIDIA-T4 16GB.
| use_flash_attn_1 | model_name | max_seq_len | batch_size | time per training step |
| ---------------- | ----------------- | ----------- | ---------- | ---------------------- |
| x | facebook/opt-350m | 2048 | 8 | ~59.1s |
| | facebook/opt-350m | 2048 | 8 | **OOM** |
| x | facebook/opt-350m | 2048 | 4 | ~30.3s |
| | facebook/opt-350m | 2048 | 4 | ~148.9s |
### Using Flash Attention-2
To use Flash Attention 2, first install the latest `flash-attn` package:
```bash
pip install -U flash-attn
```
And add `use_flash_attention_2=True` when calling `from_pretrained`:
```python
model = AutoModelForCausalLM.from_pretrained(
model_id,
load_in_4bit=True,
use_flash_attention_2=True
)
```
If you don't use quantization, make sure your model is loaded in half-precision and dispatch your model on a supported GPU device.
After loading your model, you can either train it as it is, or attach adapters and train adapters on it in case your model is quantized.
In contrary to Flash Attention 1, the integration makes it possible to train your model on an arbitrary dataset that also includes padding tokens.
### Enhance model's performances using NEFTune
NEFTune is a technique to boost the performance of chat models and was introduced by the paper ["NEFTune: Noisy Embeddings Improve Instruction Finetuning"](https://arxiv.org/abs/2310.05914) from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper:
> Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/neft-screenshot.png">
</div>
To use it in `SFTTrainer` simply pass `neftune_noise_alpha` when creating your `SFTTrainer` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer.
```python
from datasets import load_dataset
from trl import SFTTrainer
dataset = load_dataset("imdb", split="train")
trainer = SFTTrainer(
"facebook/opt-350m",
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=512,
neftune_noise_alpha=5,
)
trainer.train()
```
We have tested NEFTune by training `mistralai/Mistral-7B-v0.1` on the [OpenAssistant dataset](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) and validated that using NEFTune led to a performance boost of ~25% on MT Bench.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-neftune-mistral-7b.png">
</div>
Note however, that the amount of performance gain is _dataset dependent_ and in particular, applying NEFTune on synthetic datasets like [UltraChat](https://huggingface.co/datasets/stingning/ultrachat) typically produces smaller gains.
### Accelerate fine-tuning 2x using `unsloth`
You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks on 1x A100 listed below:
| 1 A100 40GB | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved |
|-----------------|-----------|-----|-------------------------|-----------------|----------------|
| Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% |
| Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% |
| Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% |
| Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% |
First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows:
```python
import torch
from transformers import TrainingArguments
from trl import SFTTrainer
from unsloth import FastLanguageModel
max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number
# Load model
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/mistral-7b",
max_seq_length = max_seq_length,
dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True, # Use 4bit quantization to reduce memory usage. Can be False
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
# Do model patching and add fast LoRA weights
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 16,
lora_dropout = 0, # Dropout = 0 is currently optimized
bias = "none", # Bias = "none" is currently optimized
use_gradient_checkpointing = True,
random_state = 3407,
)
args = TrainingArguments(output_dir = "./output")
trainer = SFTTrainer(
model = model,
args = args,
train_dataset = dataset,
dataset_text_field = "text",
max_seq_length = max_seq_length,
)
trainer.train()
```
The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth).
## Best practices
Pay attention to the following best practices when training a model with that trainer:
- [`SFTTrainer`] always pads by default the sequences to the `max_seq_length` argument of the [`SFTTrainer`]. If none is passed, the trainer will retrieve that value from the tokenizer. Some tokenizers do not provide default value, so there is a check to retrieve the minimum between 2048 and that value. Make sure to check it before training.
- For training adapters in 8bit, you might need to tweak the arguments of the `prepare_model_for_kbit_training` method from PEFT, hence we advise users to use `prepare_in_int8_kwargs` field, or create the `PeftModel` outside the [`SFTTrainer`] and pass it.
- For a more memory-efficient training using adapters, you can load the base model in 8bit, for that simply add `load_in_8bit` argument when creating the [`SFTTrainer`], or create a base model in 8bit outside the trainer and pass it.
- If you create a model outside the trainer, make sure to not pass to the trainer any additional keyword arguments that are relative to `from_pretrained()` method.
## GPTQ Conversion
You may experience some issues with GPTQ Quantization after completing training. Lowering `gradient_accumulation_steps` to `4` will resolve most issues during the quantization process to GPTQ format.
## SFTTrainer
[[autodoc]] SFTTrainer
## ConstantLengthDataset
[[autodoc]] trainer.ConstantLengthDataset
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/index.mdx | <div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_banner_dark.png">
</div>
# TRL - Transformer Reinforcement Learning
TRL is a full stack library where we provide a set of tools to train transformer language models with Reinforcement Learning, from the Supervised Fine-tuning step (SFT), Reward Modeling step (RM) to the Proximal Policy Optimization (PPO) step.
The library is integrated with 🤗 [transformers](https://github.com/huggingface/transformers).
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/TRL-readme.png">
</div>
Check the appropriate sections of the documentation depending on your needs:
## API documentation
- [Model Classes](models): *A brief overview of what each public model class does.*
- [`SFTTrainer`](sft_trainer): *Supervise Fine-tune your model easily with `SFTTrainer`*
- [`RewardTrainer`](reward_trainer): *Train easily your reward model using `RewardTrainer`.*
- [`PPOTrainer`](ppo_trainer): *Further fine-tune the supervised fine-tuned model using PPO algorithm*
- [Best-of-N Sampling](best-of-n): *Use best of n sampling as an alternative way to sample predictions from your active model*
- [`DPOTrainer`](dpo_trainer): *Direct Preference Optimization training using `DPOTrainer`.*
- [`TextEnvironment`](text_environment): *Text environment to train your model using tools with RL.*
## Examples
- [Sentiment Tuning](sentiment_tuning): *Fine tune your model to generate positive movie contents*
- [Training with PEFT](lora_tuning_peft): *Memory efficient RLHF training using adapters with PEFT*
- [Detoxifying LLMs](detoxifying_a_lm): *Detoxify your language model through RLHF*
- [StackLlama](using_llama_models): *End-to-end RLHF training of a Llama model on Stack exchange dataset*
- [Learning with Tools](learning_tools): *Walkthrough of using `TextEnvironments`*
- [Multi-Adapter Training](multi_adapter_rl): *Use a single base model and multiple adapters for memory efficient end-to-end training*
## Blog posts
<div class="mt-10">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/rlhf">
<img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/120_rlhf/thumbnail.png" alt="thumbnail">
<p class="text-gray-700">Illustrating Reinforcement Learning from Human Feedback</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-peft">
<img src="https://github.com/huggingface/blog/blob/main/assets/133_trl_peft/thumbnail.png?raw=true" alt="thumbnail">
<p class="text-gray-700">Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/stackllama">
<img src="https://github.com/huggingface/blog/blob/main/assets/138_stackllama/thumbnail.png?raw=true" alt="thumbnail">
<p class="text-gray-700">StackLLaMA: A hands-on guide to train LLaMA with RLHF</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/dpo-trl">
<img src="https://github.com/huggingface/blog/blob/main/assets/157_dpo_trl/dpo_thumbnail.png?raw=true" alt="thumbnail">
<p class="text-gray-700">Fine-tune Llama 2 with DPO</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-ddpo">
<img src="https://github.com/huggingface/blog/blob/main/assets/166_trl_ddpo/thumbnail.png?raw=true" alt="thumbnail">
<p class="text-gray-700">Finetune Stable Diffusion Models with DDPO via TRL</p>
</a>
</div>
</div>
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/models.mdx | # Models
With the `AutoModelForCausalLMWithValueHead` class TRL supports all decoder model architectures in transformers such as GPT-2, OPT, and GPT-Neo. In addition, with `AutoModelForSeq2SeqLMWithValueHead` you can use encoder-decoder architectures such as T5. TRL also requires reference models which are frozen copies of the model that is trained. With `create_reference_model` you can easily create a frozen copy and also share layers between the two models to save memory.
## PreTrainedModelWrapper
[[autodoc]] PreTrainedModelWrapper
## AutoModelForCausalLMWithValueHead
[[autodoc]] AutoModelForCausalLMWithValueHead
- __init__
- forward
- generate
- _init_weights
## AutoModelForSeq2SeqLMWithValueHead
[[autodoc]] AutoModelForSeq2SeqLMWithValueHead
- __init__
- forward
- generate
- _init_weights
## create_reference_model
[[autodoc]] create_reference_model | 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/reward_trainer.mdx | # Reward Modeling
TRL supports custom reward modeling for anyone to perform reward modeling on their dataset and model.
Check out a complete flexible example at [`examples/scripts/reward_modeling.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py).
## Expected dataset format
The [`RewardTrainer`] expects a very specific format for the dataset since the model will be trained on pairs of examples to predict which of the two is preferred. We provide an example from the [`Anthropic/hh-rlhf`](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset below:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/rlhf-antropic-example.png", width="50%">
</div>
Therefore the final dataset object should contain two 4 entries at least if you use the default [`RewardDataCollatorWithPadding`] data collator. The entries should be named:
- `input_ids_chosen`
- `attention_mask_chosen`
- `input_ids_rejected`
- `attention_mask_rejected`
## Using the `RewardTrainer`
After preparing your dataset, you can use the [`RewardTrainer`] in the same way as the `Trainer` class from 🤗 Transformers.
You should pass an `AutoModelForSequenceClassification` model to the [`RewardTrainer`], along with a [`RewardConfig`] which configures the hyperparameters of the training.
### Leveraging 🤗 PEFT to train a reward model
Just pass a `peft_config` in the keyword arguments of [`RewardTrainer`], and the trainer should automatically take care of converting the model into a PEFT model!
```python
from peft import LoraConfig, TaskType
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from trl import RewardTrainer, RewardConfig
model = AutoModelForSequenceClassification.from_pretrained("gpt2")
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
...
trainer = RewardTrainer(
model=model,
args=training_args,
tokenizer=tokenizer,
train_dataset=dataset,
peft_config=peft_config,
)
trainer.train()
```
### Adding a margin to the loss
As in the [Llama 2 paper](https://huggingface.co/papers/2307.09288), you can add a margin to the loss by adding a `margin` column to the dataset. The reward collator will automatically pass it through and the loss will be computed accordingly.
```python
def add_margin(row):
# Assume you have a score_chosen and score_rejected columns that you want to use to compute the margin
return {'margin': row['score_chosen'] - row['score_rejected']}
dataset = dataset.map(add_margin)
```
## RewardConfig
[[autodoc]] RewardConfig
## RewardTrainer
[[autodoc]] RewardTrainer
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/iterative_sft_trainer.mdx | # Iterative Trainer
Iterative fine-tuning is a training method that enables to perform custom actions (generation and filtering for example) between optimization steps. In TRL we provide an easy-to-use API to fine-tune your models in an iterative way in just a few lines of code.
## Usage
To get started quickly, instantiate an instance a model, and a tokenizer.
```python
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
trainer = IterativeSFTTrainer(
model,
tokenizer
)
```
You have the choice to either provide a list of strings or a list of tensors to the step function.
#### Using a list of tensors as input:
```python
inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask
}
trainer.step(**inputs)
```
#### Using a list of strings as input:
```python
inputs = {
"texts": texts
}
trainer.step(**inputs)
```
For causal language models, labels will automatically be created from input_ids or from texts. When using sequence to sequence models you will have to provide your own labels or text_labels.
## IterativeTrainer
[[autodoc]] IterativeSFTTrainer
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/use_model.md | # Use model after training
Once you have trained a model using either the SFTTrainer, PPOTrainer, or DPOTrainer, you will have a fine-tuned model that can be used for text generation. In this section, we'll walk through the process of loading the fine-tuned model and generating text. If you need to run an inference server with the trained model, you can explore libraries such as [`text-generation-inference`](https://github.com/huggingface/text-generation-inference).
## Load and Generate
If you have fine-tuned a model fully, meaning without the use of PEFT you can simply load it like any other language model in transformers. E.g. the value head that was trained during the PPO training is no longer needed and if you load the model with the original transformer class it will be ignored:
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
model_name_or_path = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub
device = "cpu" # or "cuda" if you have a GPU
model = AutoModelForCausalLM.from_pretrained(model_name_or_path).to(device)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
inputs = tokenizer.encode("This movie was really", return_tensors="pt").to(device)
outputs = model.generate(inputs)
print(tokenizer.decode(outputs[0]))
```
Alternatively you can also use the pipeline:
```python
from transformers import pipeline
model_name_or_path = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub
pipe = pipeline("text-generation", model=model_name_or_path)
print(pipe("This movie was really")[0]["generated_text"])
```
## Use Adapters PEFT
```python
from peft import PeftConfig, PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
base_model_name = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub"
adapter_model_name = "path/to/my/adapter"
model = AutoModelForCausalLM.from_pretrained(base_model_name)
model = PeftModel.from_pretrained(model, adapter_model_name)
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
```
You can also merge the adapters into the base model so you can use the model like a normal transformers model, however the checkpoint will be significantly bigger:
```python
model = AutoModelForCausalLM.from_pretrained(base_model_name)
model = PeftModel.from_pretrained(model, adapter_model_name)
model = model.merge_and_unload()
model.save_pretrained("merged_adapters")
```
Once you have the model loaded and either merged the adapters or keep them separately on top you can run generation as with a normal model outlined above.
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/best_of_n.mdx | # Best of N sampling: Alternative ways to get better model output without RL based fine-tuning
Within the extras module is the `best-of-n` sampler class that serves as an alternative method of generating better model output.
As to how it fares against the RL based fine-tuning, please look in the `examples` directory for a comparison example
## Usage
To get started quickly, instantiate an instance of the class with a model, a length sampler, a tokenizer and a callable that serves as a proxy reward pipeline that outputs reward scores for input queries
```python
from transformers import pipeline, AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead
from trl.core import LengthSampler
from trl.extras import BestOfNSampler
ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name)
reward_pipe = pipeline("sentiment-analysis", model=reward_model, device=device)
tokenizer = AutoTokenizer.from_pretrained(ref_model_name)
tokenizer.pad_token = tokenizer.eos_token
# callable that takes a list of raw text and returns a list of corresponding reward scores
def queries_to_scores(list_of_strings):
return [output["score"] for output in reward_pipe(list_of_strings)]
best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler)
```
And assuming you have a list/tensor of tokenized queries, you can generate better output by calling the `generate` method
```python
best_of_n.generate(query_tensors, device=device, **gen_kwargs)
```
The default sample size is 4, but you can change it at the time of instance initialization like so
```python
best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, sample_size=8)
```
The default output is the result of taking the top scored output for each query, but you can change it to top 2 and so on by passing the `n_candidates` argument at the time of instance initialization
```python
best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, n_candidates=2)
```
There is the option of setting the generation settings (like `temperature`, `pad_token_id`) at the time of instance creation as opposed to when calling the `generate` method.
This is done by passing a `GenerationConfig` from the `transformers` library at the time of initialization
```python
from transformers import GenerationConfig
generation_config = GenerationConfig(min_length= -1, top_k=0.0, top_p= 1.0, do_sample= True, pad_token_id=tokenizer.eos_token_id)
best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, generation_config=generation_config)
best_of_n.generate(query_tensors, device=device)
```
Furthermore, at the time of initialization you can set the seed to control repeatability of the generation process and the number of samples to generate for each query
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/example_overview.md | # Examples
## Introduction
The examples should work in any of the following settings (with the same script):
- single GPU
- multi GPUS (using PyTorch distributed mode)
- multi GPUS (using DeepSpeed ZeRO-Offload stages 1, 2, & 3)
- fp16 (mixed-precision), fp32 (normal precision), or bf16 (bfloat16 precision)
To run it in each of these various modes, first initialize the accelerate
configuration with `accelerate config`
**NOTE to train with a 4-bit or 8-bit model**, please run
```bash
pip install --upgrade trl[quantization]
```
## Accelerate Config
For all the examples, you'll need to generate a 🤗 Accelerate config file with:
```shell
accelerate config # will prompt you to define the training configuration
```
Then, it is encouraged to launch jobs with `accelerate launch`!
# Maintained Examples
| File | Description |
|------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|
| [`examples/scripts/sft.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/sft.py) | This script shows how to use the `SFTTrainer` to fine tune a model or adapters into a target dataset. |
| [`examples/scripts/reward_modeling.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/reward_modeling.py) | This script shows how to use the `RewardTrainer` to train a reward model on your own dataset. |
| [`examples/scripts/ppo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) | This script shows how to use the `PPOTrainer` to fine-tune a sentiment analysis model using IMDB dataset |
| [`examples/scripts/ppo_multi_adapter.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo_multi_adapter.py) | This script shows how to use the `PPOTrainer` to train a single base model with multiple adapters. Requires you to run the example script with the reward model training beforehand. |
| [`examples/scripts/stable_diffusion_tuning_example.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/stable_diffusion_tuning_example.py) | This script shows to use DDPOTrainer to fine-tune a stable diffusion model using reinforcement learning. |
Here are also some easier-to-run colab notebooks that you can use to get started with TRL:
| File | Description |
|----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|
| [`examples/notebooks/best_of_n.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/best_of_n.ipynb) | This notebook demonstrates how to use the "Best of N" sampling strategy using TRL when fine-tuning your model with PPO. |
| [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment.ipynb) | This notebook demonstrates how to reproduce the GPT2 imdb sentiment tuning example on a jupyter notebook. |
| [`examples/notebooks/gpt2-control.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-control.ipynb) | This notebook demonstrates how to reproduce the GPT2 sentiment control example on a jupyter notebook. |
We also have some other examples that are less maintained but can be used as a reference:
1. **[research_projects](https://github.com/huggingface/trl/tree/main/examples/research_projects)**: Check out this folder to find the scripts used for some research projects that used TRL (LM de-toxification, Stack-Llama, etc.)
## Distributed training
All of the scripts can be run on multiple GPUs by providing the path of an 🤗 Accelerate config file when calling `accelerate launch`. To launch one of them on one or multiple GPUs, run the following command (swapping `{NUM_GPUS}` with the number of GPUs in your machine and `--all_arguments_of_the_script` with your arguments.)
```shell
accelerate launch --config_file=examples/accelerate_configs/multi_gpu.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script
```
You can also adjust the parameters of the 🤗 Accelerate config file to suit your needs (e.g. training in mixed precision).
### Distributed training with DeepSpeed
Most of the scripts can be run on multiple GPUs together with DeepSpeed ZeRO-{1,2,3} for efficient sharding of the optimizer states, gradients, and model weights. To do so, run following command (swapping `{NUM_GPUS}` with the number of GPUs in your machine, `--all_arguments_of_the_script` with your arguments, and `--deepspeed_config` with the path to the DeepSpeed config file such as `examples/deepspeed_configs/deepspeed_zero1.yaml`):
```shell
accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero{1,2,3}.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script
```
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/ddpo_trainer.mdx | # Denoising Diffusion Policy Optimization
## The why
| Before | After DDPO finetuning |
| --- | --- |
| <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/pre_squirrel.png"/></div> | <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/post_squirrel.png"/></div> |
| <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/pre_crab.png"/></div> | <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/post_crab.png"/></div> |
| <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/pre_starfish.png"/></div> | <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/post_starfish.png"/></div> |
## Getting started with Stable Diffusion finetuning with reinforcement learning
The machinery for finetuning of Stable Diffusion models with reinforcement learning makes heavy use of HuggingFace's `diffusers`
library. A reason for stating this is that getting started requires a bit of familiarity with the `diffusers` library concepts, mainly two of them - pipelines and schedulers.
Right out of the box (`diffusers` library), there isn't a `Pipeline` nor a `Scheduler` instance that is suitable for finetuning with reinforcement learning. Some adjustments need to made.
There is a pipeline interface that is provided by this library that is required to be implemented to be used with the `DDPOTrainer`, which is the main machinery for fine-tuning Stable Diffusion with reinforcement learning. **Note: Only the StableDiffusion architecture is supported at this point.**
There is a default implementation of this interface that you can use out of the box. Assuming the default implementation is sufficient and/or to get things moving, refer to the training example alongside this guide.
The point of the interface is to fuse the pipeline and the scheduler into one object which allows for minimalness in terms of having the constraints all in one place. The interface was designed in hopes of catering to pipelines and schedulers beyond the examples in this repository and elsewhere at this time of writing. Also the scheduler step is a method of this pipeline interface and this may seem redundant given that the raw scheduler is accessible via the interface but this is the only way to constrain the scheduler step output to an output type befitting of the algorithm at hand (DDPO).
For a more detailed look into the interface and the associated default implementation, go [here](https://github.com/lvwerra/trl/tree/main/trl/models/modeling_sd_base.py)
Note that the default implementation has a LoRA implementation path and a non-LoRA based implementation path. The LoRA flag enabled by default and this can be turned off by passing in the flag to do so. LORA based training is faster and the LORA associated model hyperparameters responsible for model convergence aren't as finicky as non-LORA based training.
Also in addition, there is the expectation of providing a reward function and a prompt function. The reward function is used to evaluate the generated images and the prompt function is used to generate the prompts that are used to generate the images.
## Getting started with `examples/scripts/ddpo.py`
The `ddpo.py` script is a working example of using the `DDPO` trainer to finetune a Stable Diffusion model. This example explicitly configures a small subset of the overall parameters associated with the config object (`DDPOConfig`).
**Note:** one A100 GPU is recommended to get this running. Anything below a A100 will not be able to run this example script and even if it does via relatively smaller sized parameters, the results will most likely be poor.
Almost every configuration parameter has a default. There is only one commandline flag argument that is required of the user to get things up and running. The user is expected to have a [huggingface user access token](https://huggingface.co/docs/hub/security-tokens) that will be used to upload the model post finetuning to HuggingFace hub. The following bash command is to be entered to get things running
```batch
python ddpo.py --hf_user_access_token <token>
```
To obtain the documentation of `stable_diffusion_tuning.py`, please run `python stable_diffusion_tuning.py --help`
The following are things to keep in mind (The code checks this for you as well) in general while configuring the trainer (beyond the use case of using the example script)
- The configurable sample batch size (`--ddpo_config.sample_batch_size=6`) should be greater than or equal to the configurable training batch size (`--ddpo_config.train_batch_size=3`)
- The configurable sample batch size (`--ddpo_config.sample_batch_size=6`) must be divisible by the configurable train batch size (`--ddpo_config.train_batch_size=3`)
- The configurable sample batch size (`--ddpo_config.sample_batch_size=6`) must be divisible by both the configurable gradient accumulation steps (`--ddpo_config.train_gradient_accumulation_steps=1`) and the configurable accelerator processes count
## Setting up the image logging hook function
Expect the function to be given a list of lists of the form
```python
[[image, prompt, prompt_metadata, rewards, reward_metadata], ...]
```
and `image`, `prompt`, `prompt_metadata`, `rewards`, `reward_metadata` are batched.
The last list in the lists of lists represents the last sample batch. You are likely to want to log this one
While you are free to log however you want the use of `wandb` or `tensorboard` is recommended.
### Key terms
- `rewards` : The rewards/score is a numerical associated with the generated image and is key to steering the RL process
- `reward_metadata` : The reward metadata is the metadata associated with the reward. Think of this as extra information payload delivered alongside the reward
- `prompt` : The prompt is the text that is used to generate the image
- `prompt_metadata` : The prompt metadata is the metadata associated with the prompt. A situation where this will not be empty is when the reward model comprises of a [`FLAVA`](https://huggingface.co/docs/transformers/model_doc/flava) setup where questions and ground answers (linked to the generated image) are expected with the generated image (See here: https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/rewards.py#L45)
- `image` : The image generated by the Stable Diffusion model
Example code for logging sampled images with `wandb` is given below.
```python
# for logging these images to wandb
def image_outputs_hook(image_data, global_step, accelerate_logger):
# For the sake of this example, we only care about the last batch
# hence we extract the last element of the list
result = {}
images, prompts, _, rewards, _ = image_data[-1]
for i, image in enumerate(images):
pil = Image.fromarray(
(image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8)
)
pil = pil.resize((256, 256))
result[f"{prompts[i]:.25} | {rewards[i]:.2f}"] = [pil]
accelerate_logger.log_images(
result,
step=global_step,
)
```
### Using the finetuned model
Assuming you've done with all the epochs and have pushed up your model to the hub, you can use the finetuned model as follows
```python
import torch
from trl import DefaultDDPOStableDiffusionPipeline
pipeline = DefaultDDPOStableDiffusionPipeline("metric-space/ddpo-finetuned-sd-model")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# memory optimization
pipeline.vae.to(device, torch.float16)
pipeline.text_encoder.to(device, torch.float16)
pipeline.unet.to(device, torch.float16)
prompts = ["squirrel", "crab", "starfish", "whale","sponge", "plankton"]
results = pipeline(prompts)
for prompt, image in zip(prompts,results.images):
image.save(f"{prompt}.png")
```
## Credits
This work is heavily influenced by the repo [here](https://github.com/kvablack/ddpo-pytorch) and the associated paper [Training Diffusion Models
with Reinforcement Learning by Kevin Black, Michael Janner, Yilan Du, Ilya Kostrikov, Sergey Levine](https://arxiv.org/abs/2305.13301). | 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/dpo_trainer.mdx | # DPO Trainer
TRL supports the DPO Trainer for training language models from preference data, as described in the paper [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://arxiv.org/abs/2305.18290) by Rafailov et al., 2023. For a full example have a look at [`examples/scripts/dpo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/dpo.py).
The first step as always is to train your SFT model, to ensure the data we train on is in-distribution for the DPO algorithm.
## Expected dataset format
The DPO trainer expects a very specific format for the dataset. Since the model will be trained to directly optimize the preference of which sentence is the most relevant, given two sentences. We provide an example from the [`Anthropic/hh-rlhf`](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset below:
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/rlhf-antropic-example.png", width="50%">
</div>
Therefore the final dataset object should contain these 3 entries if you use the default `DPODataCollatorWithPadding` data collator. The entries should be named:
- `prompt`
- `chosen`
- `rejected`
for example:
```py
dpo_dataset_dict = {
"prompt": [
"hello",
"how are you",
"What is your name?",
"What is your name?",
"Which is the best programming language?",
"Which is the best programming language?",
"Which is the best programming language?",
],
"chosen": [
"hi nice to meet you",
"I am fine",
"My name is Mary",
"My name is Mary",
"Python",
"Python",
"Java",
],
"rejected": [
"leave me alone",
"I am not fine",
"Whats it to you?",
"I dont have a name",
"Javascript",
"C++",
"C++",
],
}
```
where the `prompt` contains the context inputs, `chosen` contains the corresponding chosen responses and `rejected` contains the corresponding negative (rejected) responses. As can be seen a prompt can have multiple responses and this is reflected in the entries being repeated in the dictionary's value arrays.
## Expected model format
The DPO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function.
## Using the `DPOTrainer`
For a detailed example have a look at the `examples/scripts/dpo.py` script. At a high level we need to initialize the `DPOTrainer` with a `model` we wish to train, a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response, the `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder).
```py
dpo_trainer = DPOTrainer(
model,
model_ref,
args=training_args,
beta=0.1,
train_dataset=train_dataset,
tokenizer=tokenizer,
)
```
After this one can then call:
```py
dpo_trainer.train()
```
Note that the `beta` is the temperature parameter for the DPO loss, typically something in the range of `0.1` to `0.5`. We ignore the reference model as `beta` -> 0.
## Loss functions
Given the preference data, we can fit a binary classifier according to the Bradley-Terry model and in fact the DPO authors propose the sigmoid loss on the normalized likelihood via the `logsigmoid` to fit a logistic regression.
The [RSO](https://arxiv.org/abs/2309.06657) authors propose to use a hinge loss on the normalized likelihood from the [SLiC](https://arxiv.org/abs/2305.10425) paper. The `DPOTrainer` can be switched to this loss via the `loss_type="hinge"` argument and the `beta` in this case is the reciprocal of the margin.
The [IPO](https://arxiv.org/abs/2310.12036) authors provide a deeper theoretical understanding of the DPO algorithms and identify an issue with overfitting and propose an alternative loss which can be used via the `loss_type="ipo"` argument to the trainer.
The [cDPO](https://ericmitchell.ai/cdpo.pdf) is a tweak on the DPO loss where we assume that the preference labels are noisy with some probability that can be passed to the `DPOTrainer` via `label_smoothing` argument (between 0 and 0.5) and then a conservative DPO loss is used. Use the `loss_type="cdpo"` argument to the trainer to use it.
The [KTO](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf) loss is derived to directly maximize the utility of LLM generations instead of the log-likelihood of preferences. Thus the dataset are not necessarily preferences but rather desirable vs undesirable completions. For paired preference data as required by the `DPOTrainer`, use the `loss_type="kto_pair"` argument to the trainer to utilize this loss, while for the more general case of desired and undesirable data, use the as of yet unimplemented `KTOTrainer`.
## Logging
While training and evaluating we record the following reward metrics:
* `rewards/chosen`: the mean difference between the log probabilities of the policy model and the reference model for the chosen responses scaled by beta
* `rewards/rejected`: the mean difference between the log probabilities of the policy model and the reference model for the rejected responses scaled by beta
* `rewards/accuracies`: mean of how often the chosen rewards are > than the corresponding rejected rewards
* `rewards/margins`: the mean difference between the chosen and corresponding rejected rewards
## Accelerate DPO fine-tuning using `unsloth`
You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks for DPO listed below:
| GPU | Model | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved |
|----------|-----------------|-----------|------|------------------------|-----------------|----------------|
| A100 40G | Zephyr 7b | Ultra Chat| 1x | 1.24x | **1.88x** | -11.6% |
| Tesla T4 | Zephyr 7b | Ultra Chat| 1x | 1.09x | **1.55x** | -18.6% |
First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows:
```python
import torch
from transformers import TrainingArguments
from trl import DPOTrainer
from unsloth import FastLanguageModel
max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number.
# Load model
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/zephyr-sft",
max_seq_length = max_seq_length,
dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True, # Use 4bit quantization to reduce memory usage. Can be False.
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
# Do model patching and add fast LoRA weights
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 16,
lora_dropout = 0, # Dropout = 0 is currently optimized
bias = "none", # Bias = "none" is currently optimized
use_gradient_checkpointing = True,
random_state = 3407,
)
args = TrainingArguments(output_dir="./output")
dpo_trainer = DPOTrainer(
model,
model_ref=None,
args=training_args,
beta=0.1,
train_dataset=train_dataset,
tokenizer=tokenizer,
)
dpo_trainer.train()
```
The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth).
## Reference model considerations with PEFT
You have three main options (plus several variants) for how the reference model works when using PEFT, assuming the model that you would like to further enhance with DPO was tuned using (Q)LoRA.
1. Simply create two instances of the model, each loading your adapter - works fine but is very inefficient.
2. Merge the adapter into the base model, create another adapter on top, then leave the `model_ref` param null, in which case DPOTrainer will unload the adapter for reference inference - efficient, but has potential downsides discussed below.
3. Load the adapter twice with different names, then use `set_adapter` during training to swap between the adapter being DPO'd and the reference adapter - slightly less efficient compared to 2 (~adapter size VRAM overhead), but avoids the pitfalls.
### Downsides to merging QLoRA before DPO (approach 2)
As suggested by [Tim Dettmers](https://twitter.com/Tim_Dettmers/status/1694654191325573456), the best option for merging QLoRA adapters is to first quantize the base model, merge the adapter, then convert back to bf16. Something similar to [this script](https://github.com/jondurbin/qlora/blob/main/qmerge.py)
You can also just merge the adapters the standard way without quantizing the base model, but then you have 1-2% reduced performance (and evidently, more issues with empty responses).
If you use the recommended approach, which quantizes the model, you're now in a situation where to use QLoRA for DPO, you will need to re-quantize the merged model again or use an unquantized merge with lower overall performance.
### Using option 3 - load the adapter twice
To avoid the downsides with option 2, at the expense of slightly increased VRAM, you can load your fine-tuned adapter into the model twice, with different names, and set the model/ref adapter names in DPOTrainer.
For example:
```python
# Load the base model.
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
model = AutoModelForCausalLM.from_pretrained(
"mistralai/mixtral-8x7b-v0.1",
load_in_4bit=True,
quantization_config=bnb_config,
attn_implementation="flash_attention_2",
torch_dtype=torch.bfloat16,
device_map="auto",
)
model.config.use_cache = False
# Load the adapter.
model = PeftModel.from_pretrained(
model,
"/path/to/peft",
is_trainable=True,
adapter_name="train",
)
# Load the adapter a second time, with a different name, which will be our reference model.
model.load_adapter("/path/to/peft", adapter_name="reference")
# Initialize the trainer, without a ref_model param.
dpo_trainer = DPOTrainer(
model,
...
model_adapter_name="train",
ref_adapter_name="reference",
)
```
## DPOTrainer
[[autodoc]] DPOTrainer
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/_toctree.yml | - sections:
- local: index
title: TRL
- local: quickstart
title: Quickstart
- local: installation
title: Installation
- local: how_to_train
title: PPO Training FAQ
- local: use_model
title: Use Trained Models
- local: customization
title: Customize the Training
- local: logging
title: Understanding Logs
title: Get started
- sections:
- local: models
title: Model Classes
- local: trainer
title: Trainer Classes
- local: reward_trainer
title: Reward Model Training
- local: sft_trainer
title: Supervised Fine-Tuning
- local: ppo_trainer
title: PPO Trainer
- local: best_of_n
title: Best of N Sampling
- local: dpo_trainer
title: DPO Trainer
- local: ddpo_trainer
title: Denoising Diffusion Policy Optimization
- local: iterative_sft_trainer
title: Iterative Supervised Fine-Tuning
- local: text_environments
title: Text Environments
title: API
- sections:
- local: example_overview
title: Example Overview
- local: sentiment_tuning
title: Sentiment Tuning
- local: lora_tuning_peft
title: Training with PEFT
- local: detoxifying_a_lm
title: Detoxifying a Language Model
- local: using_llama_models
title: Training StackLlama
- local: learning_tools
title: Learning to Use Tools
- local: multi_adapter_rl
title: Multi Adapter RLHF
title: Examples
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/trainer.mdx | # Trainer
At TRL we support PPO (Proximal Policy Optimisation) with an implementation that largely follows the structure introduced in the paper "Fine-Tuning Language Models from Human Preferences" by D. Ziegler et al. [[paper](https://arxiv.org/pdf/1909.08593.pdf), [code](https://github.com/openai/lm-human-preferences)].
The Trainer and model classes are largely inspired from `transformers.Trainer` and `transformers.AutoModel` classes and adapted for RL.
We also support a `RewardTrainer` that can be used to train a reward model.
## PPOConfig
[[autodoc]] PPOConfig
## PPOTrainer
[[autodoc]] PPOTrainer
## RewardConfig
[[autodoc]] RewardConfig
## RewardTrainer
[[autodoc]] RewardTrainer
## SFTTrainer
[[autodoc]] SFTTrainer
## DPOTrainer
[[autodoc]] DPOTrainer
## DDPOConfig
[[autodoc]] DDPOConfig
## DDPOTrainer
[[autodoc]] DDPOTrainer
## IterativeSFTTrainer
[[autodoc]] IterativeSFTTrainer
## set_seed
[[autodoc]] set_seed
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/using_llama_models.mdx | # Using LLaMA models with TRL
We've begun rolling out examples to use Meta's LLaMA models in `trl` (see [Meta's LLaMA release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) for the original LLaMA model).
## Efficient training strategies
Even training the smallest LLaMA model requires an enormous amount of memory. Some quick math: in bf16, every parameter uses 2 bytes (in fp32 4 bytes) in addition to 8 bytes used, e.g., in the Adam optimizer (see the [performance docs](https://huggingface.co/docs/transformers/perf_train_gpu_one#optimizer) in Transformers for more info). So a 7B parameter model would use `(2+8)*7B=70GB` just to fit in memory and would likely need more when you compute intermediate values such as attention scores. So you couldn’t train the model even on a single 80GB A100 like that. You can use some tricks, like more efficient optimizers of half-precision training, to squeeze a bit more into memory, but you’ll run out sooner or later.
Another option is to use Parameter-Efficient Fine-Tuning (PEFT) techniques, such as the [`peft`](https://github.com/huggingface/peft) library, which can perform low-rank adaptation (LoRA) on a model loaded in 8-bit.
For more on `peft` + `trl`, see the [docs](https://huggingface.co/docs/trl/sentiment_tuning_peft).
Loading the model in 8bit reduces the memory footprint drastically since you only need one byte per parameter for the weights (e.g. 7B LlaMa is 7GB in memory).
Instead of training the original weights directly, LoRA adds small adapter layers on top of some specific layers (usually the attention layers); thus, the number of trainable parameters is drastically reduced.
In this scenario, a rule of thumb is to allocate ~1.2-1.4GB per billion parameters (depending on the batch size and sequence length) to fit the entire fine-tuning setup.
This enables fine-tuning larger models (up to 50-60B scale models on a NVIDIA A100 80GB) at low cost.
Now we can fit very large models into a single GPU, but the training might still be very slow.
The simplest strategy in this scenario is data parallelism: we replicate the same training setup into separate GPUs and pass different batches to each GPU.
With this, you can parallelize the forward/backward passes of the model and scale with the number of GPUs.

We use either the `transformers.Trainer` or `accelerate`, which both support data parallelism without any code changes, by simply passing arguments when calling the scripts with `torchrun` or `accelerate launch`. The following runs a training script with 8 GPUs on a single machine with `accelerate` and `torchrun`, respectively.
```bash
accelerate launch --multi_gpu --num_machines 1 --num_processes 8 my_accelerate_script.py
torchrun --nnodes 1 --nproc_per_node 8 my_torch_script.py
```
## Supervised fine-tuning
Before we start training reward models and tuning our model with RL, it helps if the model is already good in the domain we are interested in.
In our case, we want it to answer questions, while for other use cases, we might want it to follow instructions, in which case instruction tuning is a great idea.
The easiest way to achieve this is by continuing to train the language model with the language modeling objective on texts from the domain or task.
The [StackExchange dataset](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences) is enormous (over 10 million instructions), so we can easily train the language model on a subset of it.
There is nothing special about fine-tuning the model before doing RLHF - it’s just the causal language modeling objective from pretraining that we apply here.
To use the data efficiently, we use a technique called packing: instead of having one text per sample in the batch and then padding to either the longest text or the maximal context of the model, we concatenate a lot of texts with a EOS token in between and cut chunks of the context size to fill the batch without any padding.

With this approach the training is much more efficient as each token that is passed through the model is also trained in contrast to padding tokens which are usually masked from the loss.
If you don't have much data and are more concerned about occasionally cutting off some tokens that are overflowing the context you can also use a classical data loader.
The packing is handled by the `ConstantLengthDataset` and we can then use the `Trainer` after loading the model with `peft`. First, we load the model in int8, prepare it for training, and then add the LoRA adapters.
```python
# load model in 8bit
model = AutoModelForCausalLM.from_pretrained(
args.model_path,
load_in_8bit=True,
device_map={"": Accelerator().local_process_index}
)
model = prepare_model_for_kbit_training(model)
# add LoRA to model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
```
We train the model for a few thousand steps with the causal language modeling objective and save the model.
Since we will tune the model again with different objectives, we merge the adapter weights with the original model weights.
**Disclaimer:** due to LLaMA's license, we release only the adapter weights for this and the model checkpoints in the following sections.
You can apply for access to the base model's weights by filling out Meta AI's [form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) and then converting them to the 🤗 Transformers format by running this [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py).
Note that you'll also need to install 🤗 Transformers from source until the `v4.28` is released.
Now that we have fine-tuned the model for the task, we are ready to train a reward model.
## Reward modeling and human preferences
In principle, we could fine-tune the model using RLHF directly with the human annotations.
However, this would require us to send some samples to humans for rating after each optimization iteration.
This is expensive and slow due to the number of training samples needed for convergence and the inherent latency of human reading and annotator speed.
A trick that works well instead of direct feedback is training a reward model on human annotations collected before the RL loop.
The goal of the reward model is to imitate how a human would rate a text. There are several possible strategies to build a reward model: the most straightforward way would be to predict the annotation (e.g. a rating score or a binary value for “good”/”bad”).
In practice, what works better is to predict the ranking of two examples, where the reward model is presented with two candidates `(y_k, y_j)` for a given prompt `x` and has to predict which one would be rated higher by a human annotator.
With the StackExchange dataset, we can infer which of the two answers was preferred by the users based on the score.
With that information and the loss defined above, we can then modify the `transformers.Trainer` by adding a custom loss function.
```python
class RewardTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0]
rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0]
loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean()
if return_outputs:
return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k}
return loss
```
We utilize a subset of a 100,000 pair of candidates and evaluate on a held-out set of 50,000. With a modest training batch size of 4, we train the Llama model using the LoRA `peft` adapter for a single epoch using the Adam optimizer with BF16 precision. Our LoRA configuration is:
```python
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
```
As detailed in the next section, the resulting adapter can be merged into the frozen model and saved for further downstream use.
## Reinforcement Learning from Human Feedback
With the fine-tuned language model and the reward model at hand, we are now ready to run the RL loop. It follows roughly three steps:
1. Generate responses from prompts,
2. Rate the responses with the reward model,
3. Run a reinforcement learning policy-optimization step with the ratings.
The Query and Response prompts are templated as follows before being tokenized and passed to the model:
```bash
Question: <Query>
Answer: <Response>
```
The same template was used for SFT, RM and RLHF stages.
Once more, we utilize `peft` for memory-efficient training, which offers an extra advantage in the RLHF context.
Here, the reference model and policy share the same base, the SFT model, which we load in 8-bit and freeze during training.
We exclusively optimize the policy's LoRA weights using PPO while sharing the base model's weights.
```python
for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)):
question_tensors = batch["input_ids"]
# sample from the policy and to generate responses
response_tensors = ppo_trainer.generate(
question_tensors,
return_prompt=False,
length_sampler=output_length_sampler,
**generation_kwargs,
)
batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)
# Compute sentiment score
texts = [q + r for q, r in zip(batch["query"], batch["response"])]
pipe_outputs = sentiment_pipe(texts, **sent_kwargs)
rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs]
# Run PPO step
stats = ppo_trainer.step(question_tensors, response_tensors, rewards)
# Log stats to Wandb
ppo_trainer.log_stats(stats, batch, rewards)
```
For the rest of the details and evaluation, please refer to our [blog post on StackLLaMA](https://huggingface.co/blog/stackllama).
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/quickstart.mdx | # Quickstart
## How does it work?
Fine-tuning a language model via PPO consists of roughly three steps:
1. **Rollout**: The language model generates a response or continuation based on a query which could be the start of a sentence.
2. **Evaluation**: The query and response are evaluated with a function, model, human feedback, or some combination of them. The important thing is that this process should yield a scalar value for each query/response pair. The optimization will aim at maximizing this value.
3. **Optimization**: This is the most complex part. In the optimisation step the query/response pairs are used to calculate the log-probabilities of the tokens in the sequences. This is done with the model that is trained and a reference model, which is usually the pre-trained model before fine-tuning. The KL-divergence between the two outputs is used as an additional reward signal to make sure the generated responses don't deviate too far from the reference language model. The active language model is then trained with PPO.
The full process is illustrated in the following figure:
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_overview.png"/>
## Minimal example
The following code illustrates the steps above.
```python
# 0. imports
import torch
from transformers import GPT2Tokenizer
from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer
# 1. load a pretrained model
model = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2")
model_ref = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2")
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
# 2. initialize trainer
ppo_config = {"batch_size": 1}
config = PPOConfig(**ppo_config)
ppo_trainer = PPOTrainer(config, model, model_ref, tokenizer)
# 3. encode a query
query_txt = "This morning I went to the "
query_tensor = tokenizer.encode(query_txt, return_tensors="pt").to(model.pretrained_model.device)
# 4. generate model response
generation_kwargs = {
"min_length": -1,
"top_k": 0.0,
"top_p": 1.0,
"do_sample": True,
"pad_token_id": tokenizer.eos_token_id,
"max_new_tokens": 20,
}
response_tensor = ppo_trainer.generate([item for item in query_tensor], return_prompt=False, **generation_kwargs)
response_txt = tokenizer.decode(response_tensor[0])
# 5. define a reward for response
# (this could be any reward such as human feedback or output from another model)
reward = [torch.tensor(1.0, device=model.pretrained_model.device)]
# 6. train model with ppo
train_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward)
```
In general, you would run steps 3-6 in a for-loop and run it on many diverse queries. You can find more realistic examples in the examples section.
## How to use a trained model
After training a `AutoModelForCausalLMWithValueHead`, you can directly use the model in `transformers`.
```python
# .. Let's assume we have a trained model using `PPOTrainer` and `AutoModelForCausalLMWithValueHead`
# push the model on the Hub
model.push_to_hub("my-fine-tuned-model-ppo")
# or save it locally
model.save_pretrained("my-fine-tuned-model-ppo")
# load the model from the Hub
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("my-fine-tuned-model-ppo")
```
You can also load your model with `AutoModelForCausalLMWithValueHead` if you want to use the value head, for example to continue training.
```python
from trl.model import AutoModelForCausalLMWithValueHead
model = AutoModelForCausalLMWithValueHead.from_pretrained("my-fine-tuned-model-ppo")
```
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/how_to_train.md | # Training FAQ
## What Metrics Should I Look at?
When performing classical supervised fine-tuning of language models, the loss (especially the validation loss) serves as a good indicator of the training progress. However, in Reinforcement Learning (RL), the loss becomes less informative about the model's performance, and its value may fluctuate while the actual performance improves.
To address this, we recommend focusing on two key metrics first:
**Mean Reward**: The primary goal is to maximize the reward achieved by the model during RL training.
**Objective KL Divergence**: KL divergence (Kullback-Leibler divergence) measures the dissimilarity between two probability distributions. In the context of RL training, we use it to quantify the difference between the current model and a reference model. Ideally, we want to keep the KL divergence between 0 and 10 to ensure the model's generated text remains close to what the reference model produces.
However, there are more metrics that can be useful for debugging, checkout the [logging section](logging).
## Why Do We Use a Reference Model, and What's the Purpose of KL Divergence?
When training RL models, optimizing solely for reward may lead to unexpected behaviors, where the model exploits the environment in ways that don't align with good language generation. In the case of RLHF, we use a reward model trained to predict whether a generated text is highly ranked by humans.
However, the RL model being optimized against the reward model may learn patterns that yield high reward but do not represent good language. This can result in extreme cases where the model generates texts with excessive exclamation marks or emojis to maximize the reward. In some worst-case scenarios, the model may generate patterns completely unrelated to natural language yet receive high rewards, similar to adversarial attacks.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/kl-example.png">
<p style="text-align: center;"> <b>Figure:</b> Samples without a KL penalty from <a href="https://arxiv.org/pdf/1909.08593.pdf">https://arxiv.org/pdf/1909.08593.pdf</a>. </p>
</div>
To address this issue, we add a penalty to the reward function based on the KL divergence between the current model and the reference model. By doing this, we encourage the model to stay close to what the reference model generates.
## What Is the Concern with Negative KL Divergence?
If you generate text by purely sampling from the model distribution things work fine in general. But when you use the `generate` method there are a few caveats because it does not always purely sample depending on the settings which can cause KL-divergence to go negative. Essentially when the active model achieves `log_p_token_active < log_p_token_ref` we get negative KL-div. This can happen in a several cases:
- **top-k sampling**: the model can smooth out the probability distribution causing the top-k tokens having a smaller probability than those of the reference model but they still are selected
- **min_length**: this ignores the EOS token until `min_length` is reached. thus the model can assign a very low log prob to the EOS token and very high probs to all others until min_length is reached
These are just a few examples. Why is negative KL an issue? The total reward `R` is computed `R = r - beta * KL` so if the model can learn how to drive KL-divergence negative it effectively gets a positive reward. In many cases it can be much easier to exploit such a bug in the generation than actually learning the reward function. In addition the KL can become arbitrarily small thus the actual reward can be very small compared to it.
So how should you generate text for PPO training? Let's have a look!
## How to generate text for training?
In order to avoid the KL issues described above we recommend to use the following settings:
```python
generation_kwargs = {
"min_length": -1, # don't ignore the EOS token (see above)
"top_k": 0.0, # no top-k sampling
"top_p": 1.0, # no nucleus sampling
"do_sample": True, # yes, we want to sample
"pad_token_id": tokenizer.eos_token_id, # most decoder models don't have a padding token - use EOS token instead
"max_new_tokens": 32, # specify how many tokens you want to generate at most
}
```
With these settings we usually don't encounter any issues. You can also experiments with other settings but if you encounter issues with negative KL-divergence try to go back to these and see if they persist.
## How can debug your own use-case?
Debugging the RL pipeline can be challenging due to its complexity. Here are some tips and suggestions to make the process easier:
- **Start from a working example**: Begin with a working example from the trl repository and gradually modify it to fit your specific use-case. Changing everything at once can make it difficult to identify the source of potential issues. For example, you can start by replacing the model in the example and once you figure out the best hyperparameters try to switch to your dataset and reward model. If you change everything at once you won't know where a potential problem comes from.
- **Start small, scale later**: Training large models can be very slow and take several hours or days until you see any improvement. For debugging this is not a convenient timescale so try to use small model variants during the development phase and scale up once that works. That being said you sometimes have to be careful as small models might not have the capacity to solve a complicated task either.
- **Start simple**: Try to start with a minimal example and build complexity from there. Your use-case might require for example a complicated reward function consisting of many different rewards - try to use one signal first and see if you can optimize that and then add more complexity after that.
- **Inspect the generations**: It's always a good idea to inspect what the model is generating. Maybe there is a big in your post-processing or your prompt. Due to bad settings you might cut-off generations too soon. These things are very hard to see on the metrics but very obvious if you look at the generations.
- **Inspect the reward model**: If you reward is not improving over time maybe there's an issue with the reward model. You can look at extreme cases to see if it does what it should: e.g. in the sentiment case you can check if simple positive and negative examples really get different rewards. And you can look at the distribution of your dataset. Finally, maybe the reward is dominated by the query which the model can't affect so you might need to normalize this (e.g. reward of query+response minus reward of the query).
These are just a few tips that we find helpful - if you have more useful tricks feel free to open a PR to add them as well!
| 0 |
hf_public_repos/trl/docs | hf_public_repos/trl/docs/source/lora_tuning_peft.mdx | # Examples of using peft with trl to finetune 8-bit models with Low Rank Adaption (LoRA)
The notebooks and scripts in this examples show how to use Low Rank Adaptation (LoRA) to fine-tune models in a memory efficient manner. Most of PEFT methods supported in peft library but note that some PEFT methods such as Prompt tuning are not supported.
For more information on LoRA, see the [original paper](https://arxiv.org/abs/2106.09685).
Here's an overview of the `peft`-enabled notebooks and scripts in the [trl repository](https://github.com/huggingface/trl/tree/main/examples):
| File | Task | Description | Colab link |
|---|---| --- |
| [`stack_llama/rl_training.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama/scripts/rl_training.py) | RLHF | Distributed fine-tuning of the 7b parameter LLaMA models with a learned reward model and `peft`. | |
| [`stack_llama/reward_modeling.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama/scripts/reward_modeling.py) | Reward Modeling | Distributed training of the 7b parameter LLaMA reward model with `peft`. | |
| [`stack_llama/supervised_finetuning.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama/scripts/supervised_finetuning.py) | SFT | Distributed instruction/supervised fine-tuning of the 7b parameter LLaMA model with `peft`. | |
## Installation
Note: peft is in active development, so we install directly from their Github page.
Peft also relies on the latest version of transformers.
```bash
pip install trl[peft]
pip install bitsandbytes loralib
pip install git+https://github.com/huggingface/transformers.git@main
#optional: wandb
pip install wandb
```
Note: if you don't want to log with `wandb` remove `log_with="wandb"` in the scripts/notebooks. You can also replace it with your favourite experiment tracker that's [supported by `accelerate`](https://huggingface.co/docs/accelerate/usage_guides/tracking).
## How to use it?
Simply declare a `PeftConfig` object in your script and pass it through `.from_pretrained` to load the TRL+PEFT model.
```python
from peft import LoraConfig
from trl import AutoModelForCausalLMWithValueHead
model_id = "edbeeching/gpt-neo-125M-imdb"
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = AutoModelForCausalLMWithValueHead.from_pretrained(
model_id,
peft_config=lora_config,
)
```
And if you want to load your model in 8bit precision:
```python
pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained(
config.model_name,
load_in_8bit=True,
peft_config=lora_config,
)
```
... or in 4bit precision:
```python
pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained(
config.model_name,
peft_config=lora_config,
load_in_4bit=True,
)
```
## Launch scripts
The `trl` library is powered by `accelerate`. As such it is best to configure and launch trainings with the following commands:
```bash
accelerate config # will prompt you to define the training configuration
accelerate launch scripts/gpt2-sentiment_peft.py # launches training
```
## Using `trl` + `peft` and Data Parallelism
You can scale up to as many GPUs as you want, as long as you are able to fit the training process in a single device. The only tweak you need to apply is to load the model as follows:
```python
from peft import LoraConfig
...
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained(
config.model_name,
peft_config=lora_config,
)
```
And if you want to load your model in 8bit precision:
```python
pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained(
config.model_name,
peft_config=lora_config,
load_in_8bit=True,
)
```
... or in 4bit precision:
```python
pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained(
config.model_name,
peft_config=lora_config,
load_in_4bit=True,
)
```
Finally, make sure that the rewards are computed on correct device as well, for that you can use `ppo_trainer.model.current_device`.
## Naive pipeline parallelism (NPP) for large models (>60B models)
The `trl` library also supports naive pipeline parallelism (NPP) for large models (>60B models). This is a simple way to parallelize the model across multiple GPUs.
This paradigm, termed as "Naive Pipeline Parallelism" (NPP) is a simple way to parallelize the model across multiple GPUs. We load the model and the adapters across multiple GPUs and the activations and gradients will be naively communicated across the GPUs. This supports `int8` models as well as other `dtype` models.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-npp.png">
</div>
### How to use NPP?
Simply load your model with a custom `device_map` argument on the `from_pretrained` to split your model across multiple devices. Check out this [nice tutorial](https://github.com/huggingface/blog/blob/main/accelerate-large-models.md) on how to properly create a `device_map` for your model.
Also make sure to have the `lm_head` module on the first GPU device as it may throw an error if it is not on the first device. As this time of writing, you need to install the `main` branch of `accelerate`: `pip install git+https://github.com/huggingface/accelerate.git@main` and `peft`: `pip install git+https://github.com/huggingface/peft.git@main`.
### Launch scripts
Although `trl` library is powered by `accelerate`, you should run your training script in a single process. Note that we do not support Data Parallelism together with NPP yet.
```bash
python PATH_TO_SCRIPT
```
## Fine-tuning Llama-2 model
You can easily fine-tune Llama2 model using `SFTTrainer` and the official script! For example to fine-tune llama2-7b on the Guanaco dataset, run (tested on a single NVIDIA T4-16GB):
```bash
python examples/scripts/sft.py --model_name meta-llama/Llama-2-7b-hf --dataset_name timdettmers/openassistant-guanaco --load_in_4bit --use_peft --batch_size 4 --gradient_accumulation_steps 2
```
| 0 |
hf_public_repos/trl/docker | hf_public_repos/trl/docker/trl-source-gpu/Dockerfile | # Builds GPU docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.10
# Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN apt-get update && \
apt-get install -y curl git wget software-properties-common git-lfs && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Install audio-related libraries
RUN apt-get update && \
apt install -y ffmpeg
RUN apt install -y libsndfile1-dev
RUN git lfs install
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN conda create --name trl python=${PYTHON_VERSION} ipython jupyter pip
RUN python3 -m pip install --no-cache-dir --upgrade pip
# Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/trl/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Stage 2
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
RUN source activate trl && \
python3 -m pip install --no-cache-dir bitsandbytes optimum auto-gptq
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Activate the conda env and install transformers + accelerate from source
RUN source activate trl && \
python3 -m pip install -U --no-cache-dir \
librosa \
"soundfile>=0.12.1" \
scipy \
git+https://github.com/huggingface/transformers \
git+https://github.com/huggingface/accelerate \
git+https://github.com/huggingface/peft \
trl[test]@git+https://github.com/huggingface/trl
RUN source activate trl && \
pip freeze | grep transformers
RUN echo "source activate trl" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"] | 0 |
hf_public_repos/trl/docker | hf_public_repos/trl/docker/trl-latest-gpu/Dockerfile | # Builds GPU docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.10
# Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN apt-get update && \
apt-get install -y curl git wget software-properties-common git-lfs && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Install audio-related libraries
RUN apt-get update && \
apt install -y ffmpeg
RUN apt install -y libsndfile1-dev
RUN git lfs install
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN conda create --name trl python=${PYTHON_VERSION} ipython jupyter pip
RUN python3 -m pip install --no-cache-dir --upgrade pip
# Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/trl/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Stage 2
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
RUN source activate trl && \
python3 -m pip install --no-cache-dir bitsandbytes optimum auto-gptq
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Activate the conda env and install transformers + accelerate from source
RUN source activate trl && \
python3 -m pip install -U --no-cache-dir \
librosa \
"soundfile>=0.12.1" \
scipy \
transformers \
accelerate \
peft \
trl[test]@git+https://github.com/huggingface/trl
RUN source activate trl && \
pip freeze | grep trl
RUN echo "source activate trl" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"] | 0 |
hf_public_repos/trl | hf_public_repos/trl/commands/run_sft.sh | #!/bin/bash
# This script runs an SFT example end-to-end on a tiny model using different possible configurations
# but defaults to QLoRA + PEFT
OUTPUT_DIR="test_sft/"
MODEL_NAME="HuggingFaceM4/tiny-random-LlamaForCausalLM"
DATASET_NAME="imdb"
MAX_STEPS=5
BATCH_SIZE=2
SEQ_LEN=128
# Handle extra arguments in case one passes accelerate configs.
EXTRA_ACCELERATE_ARGS=""
EXTRA_TRAINING_ARGS="""--use_peft \
--load_in_4bit
"""
# Set your number of GPUs here
NUM_GPUS=2
if [[ "${TRL_ACCELERATE_CONFIG}" == "" ]]; then
EXTRA_ACCELERATE_ARGS=""
else
EXTRA_ACCELERATE_ARGS="--config_file $TRL_ACCELERATE_CONFIG"
# For DeepSpeed configs we need to set the `--fp16` flag to comply with our configs exposed
# on `examples/accelerate_configs` and our runners do not support bf16 mixed precision training.
if [[ $TRL_ACCELERATE_CONFIG == *"deepspeed"* ]]; then
EXTRA_TRAINING_ARGS="--fp16"
else
echo "Keeping QLoRA + PEFT"
fi
fi
CMD="""
accelerate launch $EXTRA_ACCELERATE_ARGS \
--num_processes $NUM_GPUS \
--mixed_precision 'fp16' \
`pwd`/examples/scripts/sft.py \
--model_name $MODEL_NAME \
--dataset_name $DATASET_NAME \
--output_dir $OUTPUT_DIR \
--max_steps $MAX_STEPS \
--batch_size $BATCH_SIZE \
--seq_length $SEQ_LEN \
$EXTRA_TRAINING_ARGS
"""
echo "Starting program..."
{ # try
echo $CMD
eval "$CMD"
} || { # catch
# save log for exception
echo "Operation Failed!"
exit 1
}
exit 0 | 0 |
hf_public_repos/trl | hf_public_repos/trl/commands/run_dpo.sh | #!/bin/bash
# This script runs an SFT example end-to-end on a tiny model using different possible configurations
# but defaults to QLoRA + PEFT
OUTPUT_DIR="test_dpo/"
MODEL_NAME="HuggingFaceM4/tiny-random-LlamaForCausalLM"
MAX_STEPS=5
BATCH_SIZE=2
SEQ_LEN=128
# Handle extra arguments in case one passes accelerate configs.
EXTRA_ACCELERATE_ARGS=""
EXTRA_TRAINING_ARGS="""--use_peft \
--load_in_4bit
"""
# This is a hack to get the number of available GPUs
NUM_GPUS=2
if [[ "${TRL_ACCELERATE_CONFIG}" == "" ]]; then
EXTRA_ACCELERATE_ARGS=""
else
EXTRA_ACCELERATE_ARGS="--config_file $TRL_ACCELERATE_CONFIG"
# For DeepSpeed configs we need to set the `--fp16` flag to comply with our configs exposed
# on `examples/accelerate_configs` and our runners do not support bf16 mixed precision training.
if [[ $TRL_ACCELERATE_CONFIG == *"deepspeed"* ]]; then
EXTRA_TRAINING_ARGS="--fp16"
else
echo "Keeping QLoRA + PEFT"
fi
fi
CMD="""
accelerate launch $EXTRA_ACCELERATE_ARGS \
--num_processes $NUM_GPUS \
--mixed_precision 'fp16' \
`pwd`/examples/scripts/dpo.py \
--model_name_or_path $MODEL_NAME \
--output_dir $OUTPUT_DIR \
--max_steps $MAX_STEPS \
--per_device_train_batch_size $BATCH_SIZE \
--max_length $SEQ_LEN \
$EXTRA_TRAINING_ARGS
"""
echo "Starting program..."
{ # try
echo $CMD
eval "$CMD"
} || { # catch
# save log for exception
echo "Operation Failed!"
exit 1
}
exit 0 | 0 |
hf_public_repos | hf_public_repos/tokenizers/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/main/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue&cachedrop">
</a>
<a href="https://pepy.tech/project/tokenizers">
<img src="https://pepy.tech/badge/tokenizers/week" />
</a>
</p>
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
## Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
## Bindings
We provide bindings to the following languages (more to come!):
- [Rust](https://github.com/huggingface/tokenizers/tree/main/tokenizers) (Original implementation)
- [Python](https://github.com/huggingface/tokenizers/tree/main/bindings/python)
- [Node.js](https://github.com/huggingface/tokenizers/tree/main/bindings/node)
- [Ruby](https://github.com/ankane/tokenizers-ruby) (Contributed by @ankane, external repo)
## Quick example using Python:
Choose your model between Byte-Pair Encoding, WordPiece or Unigram and instantiate a tokenizer:
```python
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE())
```
You can customize how pre-tokenization (e.g., splitting into words) is done:
```python
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
```
Then training your tokenizer on a set of files just takes two lines of codes:
```python
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
tokenizer.train(files=["wiki.train.raw", "wiki.valid.raw", "wiki.test.raw"], trainer=trainer)
```
Once your tokenizer is trained, encode any text with just one line:
```python
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
```
Check the [documentation](https://huggingface.co/docs/tokenizers/index)
or the [quicktour](https://huggingface.co/docs/tokenizers/quicktour) to learn more!
| 0 |
hf_public_repos | hf_public_repos/tokenizers/CITATION.cff | # This CITATION.cff file was generated with cffinit.
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: HuggingFace's Tokenizers
message: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
type: software
authors:
- given-names: Anthony
family-names: Moi
email: [email protected]
affiliation: HuggingFace
- given-names: Nicolas
family-names: Patry
affiliation: HuggingFace
repository-code: 'https://github.com/huggingface/tokenizers'
url: 'https://github.com/huggingface/tokenizers'
repository: 'https://huggingface.co'
abstract: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
keywords:
- Rust
- Tokenizer
- NLP
license: Apache-2.0
commit: 37372b6
version: 0.13.4
date-released: '2023-04-05'
| 0 |
hf_public_repos | hf_public_repos/tokenizers/RELEASE.md | ## How to release
# Before the release
Simple checklist on how to make releases for `tokenizers`.
- Freeze `master` branch.
- Run all tests (Check CI has properly run)
- If any significant work, check benchmarks:
- `cd tokenizers && cargo bench` (needs to be run on latest release tag to measure difference if it's your first time)
- Run all `transformers` tests. (`transformers` is a big user of `tokenizers` we need
to make sure we don't break it, testing is one way to make sure nothing unforeseen
has been done.)
- Run all fast tests at the VERY least (not just the tokenization tests). (`RUN_PIPELINE_TESTS=1 CUDA_VISIBLE_DEVICES=-1 pytest -sv tests/`)
- When all *fast* tests work, then we can also (it's recommended) run the whole `transformers`
test suite.
- Rebase this [PR](https://github.com/huggingface/transformers/pull/16708).
This will create new docker images ready to run the tests suites with `tokenizers` from the main branch.
- Wait for actions to finish
- Rebase this [PR](https://github.com/huggingface/transformers/pull/16712)
This will run the actual full test suite.
- Check the results.
- **If any breaking change has been done**, make sure the version can safely be increased for transformers users (`tokenizers` version need to make sure users don't upgrade before `transformers` has). [link](https://github.com/huggingface/transformers/blob/main/setup.py#L154)
For instance `tokenizers>=0.10,<0.11` so we can safely upgrade to `0.11` without impacting
current users
- Then start a new PR containing all desired code changes from the following steps.
- You will `Create release` after the code modifications are on `master`.
# Rust
- `tokenizers` (rust, python & node) versions don't have to be in sync but it's
very common to release for all versions at once for new features.
- Edit `Cargo.toml` to reflect new version
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (python PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `crates.io`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
# Python
- Edit `bindings/python/setup.py` to reflect new version.
- Edit `bindings/python/py_src/tokenizers/__init__.py` to reflect new version.
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (node PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `python-vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `pypi`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
- This CI/CD has 3 distinct builds, `Pypi`(normal), `conda` and `extra`. `Extra` is REALLY slow (~4h), this is normal since it has to rebuild many things, but enables the wheel to be available for old Linuxes
# Node
- Edit `bindings/node/package.json` to reflect new version.
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (python PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `node-vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `npm`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
# Testing the CI/CD for release
If you want to make modifications to the CI/CD of the release GH actions, you need
to :
- **Comment the part that uploads the artifacts** to `crates.io`, `PyPi` or `npm`.
- Change the trigger mecanism so it can trigger every time you push to your branch.
- Keep pushing your changes until the artifacts are properly created.
| 0 |
hf_public_repos | hf_public_repos/tokenizers/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/setup.cfg | [isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = transformers
known_third_party =
absl
conllu
datasets
elasticsearch
fairseq
faiss-cpu
fastprogress
fire
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
PIL
psutil
pytest
pytorch_lightning
rouge_score
sacrebleu
seqeval
sklearn
streamlit
tensorboardX
tensorflow
tensorflow_datasets
timeout_decorator
torch
torchaudio
torchtext
torchvision
torch_xla
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/rust-toolchain | stable
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/py/tokenizers">
<img alt="Build" src="https://badge.fury.io/py/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
# Tokenizers
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
Otherwise, let's dive in!
## Main features:
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
### Installation
#### With pip:
```bash
pip install tokenizers
```
#### From sources:
To use this method, you need to have the Rust installed:
```bash
# Install with:
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="$HOME/.cargo/bin:$PATH"
```
Once Rust is installed, you can compile doing the following
```bash
git clone https://github.com/huggingface/tokenizers
cd tokenizers/bindings/python
# Create a virtual env (you can use yours as well)
python -m venv .env
source .env/bin/activate
# Install `tokenizers` in the current virtual env
pip install -e .
```
### Load a pretrained tokenizer from the Hub
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
```
### Using the provided Tokenizers
We provide some pre-build tokenizers to cover the most common cases. You can easily load one of
these using some `vocab.json` and `merges.txt` files:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
vocab = "./path/to/vocab.json"
merges = "./path/to/merges.txt"
tokenizer = CharBPETokenizer(vocab, merges)
# And then encode:
encoded = tokenizer.encode("I can feel the magic, can you?")
print(encoded.ids)
print(encoded.tokens)
```
And you can train them just as simply:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
tokenizer = CharBPETokenizer()
# Then train it!
tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ])
# Now, let's use it:
encoded = tokenizer.encode("I can feel the magic, can you?")
# And finally save it somewhere
tokenizer.save("./path/to/directory/my-bpe.tokenizer.json")
```
#### Provided Tokenizers
- `CharBPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
All of these can be used and trained as explained above!
### Build your own
Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer,
by putting all the different parts you need together.
You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs.
#### Building a byte-level BPE
Here is an example showing how to build your own byte-level BPE by putting all the different pieces
together, and then saving it to a single file:
```python
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(
vocab_size=20000,
min_frequency=2,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
)
tokenizer.train([
"./path/to/dataset/1.txt",
"./path/to/dataset/2.txt",
"./path/to/dataset/3.txt"
], trainer=trainer)
# And Save it
tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True)
```
Now, when you want to use this tokenizer, this is as simple as:
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json")
encoded = tokenizer.encode("I can feel the magic, can you?")
```
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/pyproject.toml | [project]
name = 'tokenizers'
requires-python = '>=3.7'
authors = [
{name = 'Nicolas Patry', email = '[email protected]'},
{name = 'Anthony Moi', email = '[email protected]'}
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
keywords = ["NLP", "tokenizer", "BPE", "transformer", "deep learning"]
dynamic = [
'description',
'license',
'readme',
]
dependencies = ["huggingface_hub>=0.16.4,<1.0"]
[project.urls]
Homepage = 'https://github.com/huggingface/tokenizers'
Source = 'https://github.com/huggingface/tokenizers'
[project.optional-dependencies]
testing = ["pytest", "requests", "numpy", "datasets", "black==22.3"]
docs = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"]
dev = ["tokenizers[testing]"]
[build-system]
requires = ["maturin>=1.0,<2.0"]
build-backend = "maturin"
[tool.maturin]
python-source = "py_src"
module-name = "tokenizers.tokenizers"
bindings = 'pyo3'
features = ["pyo3/extension-module"]
[tool.black]
line-length = 119
target-version = ['py35']
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/stub.py | import argparse
import inspect
import os
from pathlib import Path
import black
INDENT = " " * 4
GENERATED_COMMENT = "# Generated content DO NOT EDIT\n"
def do_indent(text: str, indent: str):
return text.replace("\n", f"\n{indent}")
def function(obj, indent, text_signature=None):
if text_signature is None:
text_signature = obj.__text_signature__
string = ""
string += f"{indent}def {obj.__name__}{text_signature}:\n"
indent += INDENT
string += f'{indent}"""\n'
string += f"{indent}{do_indent(obj.__doc__, indent)}\n"
string += f'{indent}"""\n'
string += f"{indent}pass\n"
string += "\n"
string += "\n"
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_")
if inspect.isgetsetdescriptor(obj):
return obj.__doc__ and not obj.__name__.startswith("_")
return False
def get_module_members(module):
members = [
member
for name, member in inspect.getmembers(module)
if not name.startswith("_") and not inspect.ismodule(member)
]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=""):
string = ""
if inspect.ismodule(obj):
string += GENERATED_COMMENT
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f"({mro[1].__name__})"
else:
inherit = ""
string += f"class {obj.__name__}{inherit}:\n"
body = ""
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
# Init
if obj.__text_signature__:
body += f"{indent}def __init__{obj.__text_signature__}:\n"
body += f"{indent+INDENT}pass\n"
body += "\n"
for (name, fn) in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f"{indent}pass\n"
string += body
string += "\n\n"
elif inspect.isbuiltin(obj):
string += f"{indent}@staticmethod\n"
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesing to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")
else:
raise Exception(f"Object {obj} is not supported")
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f"from .. import {origin}\n"
string += "\n"
for member in members:
name = member.__name__
string += f"{name} = {origin}.{name}\n"
return string
def do_black(content, is_pyi):
mode = black.Mode(
target_versions={black.TargetVersion.PY35},
line_length=119,
is_pyi=is_pyi,
string_normalization=True,
experimental_string_processing=False,
)
try:
return black.format_file_contents(content, fast=True, mode=mode)
except black.NothingChanged:
return content
def write(module, directory, origin, check=False):
submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, "__init__.pyi")
pyi_content = pyi_file(module)
pyi_content = do_black(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, "r") as f:
data = f.read()
assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(pyi_content)
filename = os.path.join(directory, "__init__.py")
py_content = py_file(module, origin)
py_content = do_black(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, "r") as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, "r") as f:
data = f.read()
assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(py_content)
for name, submodule in submodules:
write(submodule, os.path.join(directory, name), f"{name}", check=check)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
import tokenizers
write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- [#1096] Python 3.11 support
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#956] PyO3 version upgrade
- [#1055] M1 automated builds
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
Both trait changes warrant a "major" number since, despite best efforts to not break backward
compatibility, the code is different enough that we cannot be exactly sure.
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#962] Fix tests for python 3.10
- [#961] Added link for Ruby port of `tokenizers`
## [0.11.6]
- [#919] Fixing single_word AddedToken. (regression from 0.11.2)
- [#916] Deserializing faster `added_tokens` by loading them in batch.
## [0.11.5]
- [#895] Build `python 3.10` wheels.
## [0.11.4]
- [#884] Fixing bad deserialization following inclusion of a default for Punctuation
## [0.11.3]
- [#882] Fixing Punctuation deserialize without argument.
- [#868] Fixing missing direction in TruncationParams
- [#860] Adding TruncationSide to TruncationParams
## [0.11.0]
### Fixed
- [#585] Conda version should now work on old CentOS
- [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`.
- [#851] Doc links
### Added
- [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor
- [#845]: Documentation for `Decoders`.
### Changed
- [#850]: Added a feature gate to enable disabling `http` features
- [#718]: Fix `WordLevel` tokenizer determinism during training
- [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer`
- [#770]: Improved documentation for `UnigramTrainer`
- [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub
- [#793]: Saving a pretty JSON file by default when saving a tokenizer
## [0.10.3]
### Fixed
- [#686]: Fix SPM conversion process for whitespace deduplication
- [#707]: Fix stripping strings containing Unicode characters
### Added
- [#693]: Add a CTC Decoder for Wave2Vec models
### Removed
- [#714]: Removed support for Python 3.5
## [0.10.2]
### Fixed
- [#652]: Fix offsets for `Precompiled` corner case
- [#656]: Fix BPE `continuing_subword_prefix`
- [#674]: Fix `Metaspace` serialization problems
## [0.10.1]
### Fixed
- [#616]: Fix SentencePiece tokenizers conversion
- [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM)
- [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut`
- [#620]: Fix serialization/deserialization for overlapping models
- [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`)
## [0.10.0]
### Added
- [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work
- [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model
- [#533]: Add support for conda builds
- [#542]: Add Split pre-tokenizer to easily split using a pattern
- [#544]: Ability to train from memory. This also improves the integration with `datasets`
- [#590]: Add getters/setters for components on BaseTokenizer
- [#574]: Add `fust_unk` option to SentencePieceBPETokenizer
### Changed
- [#509]: Automatically stubbing the `.pyi` files
- [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()`
- [#530]: The various attributes on each component can be get/set (ie.
`tokenizer.model.dropout = 0.1`)
- [#538]: The API Reference has been improved and is now up-to-date.
### Fixed
- [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were
forcing to reload the `Model` after a training.
- [#539]: Fix `BaseTokenizer` enable_truncation docstring
## [0.9.4]
### Fixed
- [#492]: Fix `from_file` on `BertWordPieceTokenizer`
- [#498]: Fix the link to download `sentencepiece_model_pb2.py`
- [#500]: Fix a typo in the docs quicktour
### Changed
- [#506]: Improve Encoding mappings for pairs of sequence
## [0.9.3]
### Fixed
- [#470]: Fix hanging error when training with custom component
- [#476]: TemplateProcessing serialization is now deterministic
- [#481]: Fix SentencePieceBPETokenizer.from_files
### Added
- [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts
- [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly
## [0.9.2]
### Fixed
- [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing
## [0.9.1]
### Fixed
- [#459]: Fix a problem with deserialization
## [0.9.0]
### Fixed
- [#362]: Fix training deadlock with Python components.
- [#363]: Fix a crash when calling `.train` with some non-existent files
- [#355]: Remove a lot of possible crashes
- [#389]: Improve truncation (crash and consistency)
### Added
- [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays
- [#292]: Support for the Unigram algorithm
- [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer
- [#403]: Add `TemplateProcessing` `PostProcessor`.
- [#420]: Ability to fuse the "unk" token in BPE.
### Changed
- [#360]: Lots of improvements related to words/alignment tracking
- [#426]: Improvements on error messages thanks to PyO3 0.12
## [0.8.1]
### Fixed
- [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly
### Changed
- [#329]: Improved warning and behavior when we detect a fork
- [#330]: BertNormalizer now keeps the same behavior than the original implementation when
`strip_accents` is not specified.
## [0.8.0]
### Highlights of this release
- We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when
processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps
while applying labels to each word.
- Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later
load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code.
- With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components,
Encodings, everything can be pickled!
- Training a tokenizer is now even faster (up to 5-10x) than before!
- Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library
makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization,
this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to
disable the parallelism, and will warn you if this is necessary.
- And a lot of other improvements, and fixes.
### Fixed
- [#286]: Fix various crash when training a BPE model
- [#309]: Fixed a few bugs related to additional vocabulary/tokens
### Added
- [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...).
This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`).
- [#273]: `Tokenizer` and its parts are now pickable
- [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure
activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with
`enable_padding(pad_to_multiple_of=8)` for example.
- [#298]: Ability to get the currently set truncation/padding params
- [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment
variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork`
start method, which happens to be the default on Linux systems. Without disabling the parallelism,
the process dead-locks while encoding. (Cf [#187] for more information)
### Changed
- Improved errors generated during truncation: When the provided max length is too low are
now handled properly.
- [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized,
the argument `is_pretokenized=True` must be specified.
- [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the
processing of each file
- [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original
implementation from GPT-2
- [#309]: Improved the management of the additional vocabulary. This introduces an option
`normalized`, controlling whether a token should be extracted from the normalized version of the
input text.
## [0.7.0]
### Changed
- Only one progress bar while reading files during training. This is better for use-cases with
a high number of files as it avoids having too many progress bars on screen. Also avoids reading the
size of each file before starting to actually read these files, as this process could take really
long.
- [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we
should add the special tokens. This is activated by default.
- [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by
`encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint.
- [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the
normalized one anymore.
- The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using
`train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these
tokens.
- [#136]: Updated Pyo3 version
- [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using
constructors.
- [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default.
### Added
- [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated.
This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these
whitespaces are part of the actual token.
It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`).
- [#236]: `RobertaProcessing` also handles trimming the offsets.
- [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char`
or `word` (input space) and `token` (output space).
- `post_process` can be called on the `Tokenizer`
- [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with
`get_vocab(with_added_tokens: bool)`
- [#136] Models can now be instantiated through object constructors.
### Fixed
- [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE:
- when `add_prefix_space=True`
- [#156]: when a Unicode character gets split-up in multiple byte-level characters
- Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded.
- [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if
not advised, but that's not the question).
- [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer`
### How to migrate
- Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are
using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`).
- `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or
`encode_batch`
- Access to the `original_str` on the `Encoding` has been removed. The original string is the input
of `encode` so it didn't make sense to keep it here.
- No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They
are now relative to the original string by default.
- Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling
`normalize(sequence)` on the `Tokenizer`
- Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take
the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`)
- If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set
`bert_normalizer=False` and `split_on_whitespace_only=True`.
## [0.6.0]
### Changed
- [#165]: Big improvements in speed for BPE (Both training and tokenization)
### Fixed
- [#160]: Some default tokens were missing from `BertWordPieceTokenizer`
- [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got
split up in multiple bytes.
- [#174]: The `longest_first` truncation strategy had a bug
## [0.5.2]
- [#163]: Do not open all files directly while training
### Fixed
- We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file
was named `vocab.json`. This is now fixed.
- The `WordLevel` model was also saving its vocabulary to the wrong format.
## [0.5.1]
### Changed
- `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not
specified, the files get a more generic naming, like `vocab.json` or `merges.txt`.
## [0.5.0]
### Changed
- [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding
- [#149]: `ByteLevelBPETokenizer` now has `dropout`.
- `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers.
(Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`)
- [#139]: Expose `__len__` on `Encoding`
- Improved padding performances.
### Added
- Added a new `Strip` normalizer
### Fixed
- [#145]: Decoding was buggy on `BertWordPieceTokenizer`.
- [#152]: Some documentation and examples were still using the old `BPETokenizer`
### How to migrate
- Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of
`do_lowercase`.
## [0.4.2]
### Fixed
- [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from
being trained.
## [0.4.1]
### Fixed
- [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer
## [0.4.0]
### Changed
- [#131]: Replaced all .new() class methods by a proper __new__ implementation
- Improved typings
### How to migrate
- Remove all `.new` on all classe instanciations
## [0.3.0]
### Changed
- BPETokenizer has been renamed to CharBPETokenizer for clarity.
- Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets
truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language
model, just as the main `Encoding`.
- Provide mapping to the original string offsets using:
```
output = tokenizer.encode(...)
print(output.original_str.offsets(output.offsets[3]))
```
- [#99]: Exposed the vocabulary size on all tokenizers
### Added
- Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given
delimiter (Works like `.split(delimiter)`)
- Added `WordLevel`: a new model that simply maps `tokens` to their `ids`.
### Fixed
- Fix a bug with IndexableString
- Fix a bug with truncation
### How to migrate
- Rename `BPETokenizer` to `CharBPETokenizer`
- `Encoding.overflowing` is now a List instead of a `Optional[Encoding]`
## [0.2.1]
### Fixed
- Fix a bug with the IDs associated with added tokens.
- Fix a bug that was causing crashes in Python 3.5
[#1096]: https://github.com/huggingface/tokenizers/pull/1096
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#962]: https://github.com/huggingface/tokenizers/pull/962
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
[#919]: https://github.com/huggingface/tokenizers/pull/919
[#916]: https://github.com/huggingface/tokenizers/pull/916
[#895]: https://github.com/huggingface/tokenizers/pull/895
[#884]: https://github.com/huggingface/tokenizers/pull/884
[#882]: https://github.com/huggingface/tokenizers/pull/882
[#868]: https://github.com/huggingface/tokenizers/pull/868
[#860]: https://github.com/huggingface/tokenizers/pull/860
[#850]: https://github.com/huggingface/tokenizers/pull/850
[#844]: https://github.com/huggingface/tokenizers/pull/844
[#845]: https://github.com/huggingface/tokenizers/pull/845
[#851]: https://github.com/huggingface/tokenizers/pull/851
[#585]: https://github.com/huggingface/tokenizers/pull/585
[#793]: https://github.com/huggingface/tokenizers/pull/793
[#780]: https://github.com/huggingface/tokenizers/pull/780
[#770]: https://github.com/huggingface/tokenizers/pull/770
[#762]: https://github.com/huggingface/tokenizers/pull/762
[#718]: https://github.com/huggingface/tokenizers/pull/718
[#714]: https://github.com/huggingface/tokenizers/pull/714
[#707]: https://github.com/huggingface/tokenizers/pull/707
[#693]: https://github.com/huggingface/tokenizers/pull/693
[#686]: https://github.com/huggingface/tokenizers/pull/686
[#674]: https://github.com/huggingface/tokenizers/pull/674
[#657]: https://github.com/huggingface/tokenizers/pull/657
[#656]: https://github.com/huggingface/tokenizers/pull/656
[#652]: https://github.com/huggingface/tokenizers/pull/652
[#621]: https://github.com/huggingface/tokenizers/pull/621
[#620]: https://github.com/huggingface/tokenizers/pull/620
[#618]: https://github.com/huggingface/tokenizers/pull/618
[#617]: https://github.com/huggingface/tokenizers/pull/617
[#616]: https://github.com/huggingface/tokenizers/pull/616
[#590]: https://github.com/huggingface/tokenizers/pull/590
[#574]: https://github.com/huggingface/tokenizers/pull/574
[#544]: https://github.com/huggingface/tokenizers/pull/544
[#542]: https://github.com/huggingface/tokenizers/pull/542
[#539]: https://github.com/huggingface/tokenizers/pull/539
[#538]: https://github.com/huggingface/tokenizers/pull/538
[#533]: https://github.com/huggingface/tokenizers/pull/533
[#530]: https://github.com/huggingface/tokenizers/pull/530
[#519]: https://github.com/huggingface/tokenizers/pull/519
[#509]: https://github.com/huggingface/tokenizers/pull/509
[#508]: https://github.com/huggingface/tokenizers/pull/508
[#506]: https://github.com/huggingface/tokenizers/pull/506
[#500]: https://github.com/huggingface/tokenizers/pull/500
[#498]: https://github.com/huggingface/tokenizers/pull/498
[#492]: https://github.com/huggingface/tokenizers/pull/492
[#481]: https://github.com/huggingface/tokenizers/pull/481
[#480]: https://github.com/huggingface/tokenizers/pull/480
[#477]: https://github.com/huggingface/tokenizers/pull/477
[#476]: https://github.com/huggingface/tokenizers/pull/476
[#470]: https://github.com/huggingface/tokenizers/pull/470
[#464]: https://github.com/huggingface/tokenizers/pull/464
[#459]: https://github.com/huggingface/tokenizers/pull/459
[#420]: https://github.com/huggingface/tokenizers/pull/420
[#417]: https://github.com/huggingface/tokenizers/pull/417
[#416]: https://github.com/huggingface/tokenizers/pull/416
[#403]: https://github.com/huggingface/tokenizers/pull/403
[#394]: https://github.com/huggingface/tokenizers/pull/394
[#389]: https://github.com/huggingface/tokenizers/pull/389
[#379]: https://github.com/huggingface/tokenizers/pull/379
[#378]: https://github.com/huggingface/tokenizers/pull/378
[#363]: https://github.com/huggingface/tokenizers/pull/363
[#362]: https://github.com/huggingface/tokenizers/pull/362
[#360]: https://github.com/huggingface/tokenizers/pull/360
[#355]: https://github.com/huggingface/tokenizers/pull/355
[#333]: https://github.com/huggingface/tokenizers/pull/333
[#330]: https://github.com/huggingface/tokenizers/pull/330
[#329]: https://github.com/huggingface/tokenizers/pull/329
[#311]: https://github.com/huggingface/tokenizers/pull/311
[#309]: https://github.com/huggingface/tokenizers/pull/309
[#292]: https://github.com/huggingface/tokenizers/pull/292
[#289]: https://github.com/huggingface/tokenizers/pull/289
[#286]: https://github.com/huggingface/tokenizers/pull/286
[#280]: https://github.com/huggingface/tokenizers/pull/280
[#276]: https://github.com/huggingface/tokenizers/pull/276
[#273]: https://github.com/huggingface/tokenizers/pull/273
[#272]: https://github.com/huggingface/tokenizers/pull/272
[#249]: https://github.com/huggingface/tokenizers/pull/249
[#239]: https://github.com/huggingface/tokenizers/pull/239
[#236]: https://github.com/huggingface/tokenizers/pull/236
[#234]: https://github.com/huggingface/tokenizers/pull/234
[#208]: https://github.com/huggingface/tokenizers/pull/208
[#205]: https://github.com/huggingface/tokenizers/issues/205
[#197]: https://github.com/huggingface/tokenizers/pull/197
[#193]: https://github.com/huggingface/tokenizers/pull/193
[#190]: https://github.com/huggingface/tokenizers/pull/190
[#188]: https://github.com/huggingface/tokenizers/pull/188
[#187]: https://github.com/huggingface/tokenizers/issues/187
[#175]: https://github.com/huggingface/tokenizers/issues/175
[#174]: https://github.com/huggingface/tokenizers/issues/174
[#165]: https://github.com/huggingface/tokenizers/pull/165
[#163]: https://github.com/huggingface/tokenizers/issues/163
[#160]: https://github.com/huggingface/tokenizers/issues/160
[#156]: https://github.com/huggingface/tokenizers/pull/156
[#152]: https://github.com/huggingface/tokenizers/issues/152
[#149]: https://github.com/huggingface/tokenizers/issues/149
[#145]: https://github.com/huggingface/tokenizers/issues/145
[#139]: https://github.com/huggingface/tokenizers/issues/139
[#137]: https://github.com/huggingface/tokenizers/issues/137
[#134]: https://github.com/huggingface/tokenizers/issues/134
[#131]: https://github.com/huggingface/tokenizers/issues/131
[#99]: https://github.com/huggingface/tokenizers/pull/99
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/MANIFEST.in | include Cargo.toml
include pyproject.toml
include rust-toolchain
include ../../LICENSE
recursive-include src *
recursive-include tokenizers-lib *
recursive-exclude tokenizers-lib/target *
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/test.txt | <DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Makefile | .PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
check_dirs := examples py_src/tokenizers tests
# Format source code automatically
style:
python stub.py
black --line-length 119 --target-version py35 $(check_dirs)
# Check the source code is formatted correctly
check-style:
python stub.py --check
black --check --line-length 119 --target-version py35 examples py_src/tokenizers tests
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json
# Launch the test suite
test: $(TESTS_RESOURCES)
pip install pytest requests setuptools_rust numpy pyarrow datasets
python -m pytest -s -v tests
cargo test --no-default-features
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Cargo.toml | [package]
name = "tokenizers-python"
version = "0.15.1-dev.0"
authors = ["Anthony MOI <[email protected]>"]
edition = "2021"
[lib]
name = "tokenizers"
crate-type = ["cdylib"]
[dependencies]
rayon = "1.8"
serde = { version = "1.0", features = [ "rc", "derive" ]}
serde_json = "1.0"
libc = "0.2"
env_logger = "0.10.0"
pyo3 = { version = "0.20" }
numpy = "0.20.0"
ndarray = "0.15"
onig = { version = "6.4", default-features = false }
itertools = "0.11"
[dependencies.tokenizers]
version = "0.15.1-dev.0"
path = "../../tokenizers"
[dev-dependencies]
tempfile = "3.8"
pyo3 = { version = "0.20", features = ["auto-initialize"] }
[features]
defaut = ["pyo3/extension-module"]
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/conftest.py | import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.py | from enum import Enum
from typing import List, Tuple, Union
Offsets = Tuple[int, int]
TextInputSequence = str
"""A :obj:`str` that represents an input sequence """
PreTokenizedInputSequence = Union[List[str], Tuple[str]]
"""A pre-tokenized input sequence. Can be one of:
- A :obj:`List` of :obj:`str`
- A :obj:`Tuple` of :obj:`str`
"""
TextEncodeInput = Union[
TextInputSequence,
Tuple[TextInputSequence, TextInputSequence],
List[TextInputSequence],
]
"""Represents a textual input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.TextInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
"""
PreTokenizedEncodeInput = Union[
PreTokenizedInputSequence,
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
List[PreTokenizedInputSequence],
]
"""Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
"""
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
"""Represents all the possible types of input sequences for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextInputSequence`
- When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
"""
EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
"""Represents all the possible types of input for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextEncodeInput`
- When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
"""
class OffsetReferential(Enum):
ORIGINAL = "original"
NORMALIZED = "normalized"
class OffsetType(Enum):
BYTE = "byte"
CHAR = "char"
class SplitDelimiterBehavior(Enum):
REMOVED = "removed"
ISOLATED = "isolated"
MERGED_WITH_PREVIOUS = "merged_with_previous"
MERGED_WITH_NEXT = "merged_with_next"
CONTIGUOUS = "contiguous"
from .tokenizers import (
AddedToken,
Encoding,
NormalizedString,
PreTokenizedString,
Regex,
Token,
Tokenizer,
decoders,
models,
normalizers,
pre_tokenizers,
processors,
trainers,
__version__,
)
from .implementations import (
BertWordPieceTokenizer,
ByteLevelBPETokenizer,
CharBPETokenizer,
SentencePieceBPETokenizer,
SentencePieceUnigramTokenizer,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.pyi | # Generated content DO NOT EDIT
class AddedToken:
"""
Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
It can have special options that defines the way it should behave.
Args:
content (:obj:`str`): The content of the token
single_word (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should only match single words. If :obj:`True`, this
token will never match inside of a word. For example the token ``ing`` would match
on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
The notion of "`inside of a word`" is defined by the word boundaries pattern in
regular expressions (ie. the token should start and end with word boundaries).
lstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its left side.
If :obj:`True`, this token will greedily match any whitespace on its left. For
example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
rstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its right
side. If :obj:`True`, this token will greedily match any whitespace on its right.
It works just like :obj:`lstrip` but on the right.
normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should match against the normalized version of the input
text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
lowercasing the text, the token could be extract from the input ``"I saw a lion
Yesterday"``.
special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should be skipped when decoding.
"""
def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
pass
@property
def content(self):
"""
Get the content of this :obj:`AddedToken`
"""
pass
@property
def lstrip(self):
"""
Get the value of the :obj:`lstrip` option
"""
pass
@property
def normalized(self):
"""
Get the value of the :obj:`normalized` option
"""
pass
@property
def rstrip(self):
"""
Get the value of the :obj:`rstrip` option
"""
pass
@property
def single_word(self):
"""
Get the value of the :obj:`single_word` option
"""
pass
@property
def special(self):
"""
Get the value of the :obj:`special` option
"""
pass
class Encoding:
"""
The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
"""
@property
def attention_mask(self):
"""
The attention mask
This indicates to the LM which tokens should be attended to, and which should not.
This is especially important when batching sequences, where we need to applying
padding.
Returns:
:obj:`List[int]`: The attention mask
"""
pass
def char_to_token(self, char_pos, sequence_index=0):
"""
Get the token that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the token that contains this char in the encoded sequence
"""
pass
def char_to_word(self, char_pos, sequence_index=0):
"""
Get the word that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the word that contains this char in the input sequence
"""
pass
@property
def ids(self):
"""
The generated IDs
The IDs are the main input to a Language Model. They are the token indices,
the numerical representations that a LM understands.
Returns:
:obj:`List[int]`: The list of IDs
"""
pass
@staticmethod
def merge(encodings, growing_offsets=True):
"""
Merge the list of encodings into one final :class:`~tokenizers.Encoding`
Args:
encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
The list of encodings that should be merged in one
growing_offsets (:obj:`bool`, defaults to :obj:`True`):
Whether the offsets should accumulate while merging
Returns:
:class:`~tokenizers.Encoding`: The resulting Encoding
"""
pass
@property
def n_sequences(self):
"""
The number of sequences represented
Returns:
:obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
"""
pass
@property
def offsets(self):
"""
The offsets associated to each token
These offsets let's you slice the input string, and thus retrieve the original
part that led to producing the corresponding token.
Returns:
A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
"""
pass
@property
def overflowing(self):
"""
A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
the output into as many pieces as required to match the specified maximum length.
This field lets you retrieve all the subsequent pieces.
When you use pairs of sequences, the overflowing pieces will contain enough
variations to cover all the possible combinations, while respecting the provided
maximum length.
"""
pass
def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
"""
Pad the :class:`~tokenizers.Encoding` at the given length
Args:
length (:obj:`int`):
The desired length
direction: (:obj:`str`, defaults to :obj:`right`):
The expected padding direction. Can be either :obj:`right` or :obj:`left`
pad_id (:obj:`int`, defaults to :obj:`0`):
The ID corresponding to the padding token
pad_type_id (:obj:`int`, defaults to :obj:`0`):
The type ID corresponding to the padding token
pad_token (:obj:`str`, defaults to `[PAD]`):
The pad token to use
"""
pass
@property
def sequence_ids(self):
"""
The generated sequence indices.
They represent the index of the input sequence associated to each token.
The sequence id can be None if the token is not related to any input sequence,
like for example with special tokens.
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
"""
pass
def set_sequence_id(self, sequence_id):
"""
Set the given sequence index
Set the given sequence index for the whole range of tokens contained in this
:class:`~tokenizers.Encoding`.
"""
pass
@property
def special_tokens_mask(self):
"""
The special token mask
This indicates which tokens are special tokens, and which are not.
Returns:
:obj:`List[int]`: The special tokens mask
"""
pass
def token_to_chars(self, token_index):
"""
Get the offsets of the token at the given index.
The returned offsets are related to the input sequence that contains the
token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
"""
pass
def token_to_sequence(self, token_index):
"""
Get the index of the sequence represented by the given token.
In the general use case, this method returns :obj:`0` for a single sequence or
the first sequence of a pair, and :obj:`1` for the second sequence of a pair
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The sequence id of the given token
"""
pass
def token_to_word(self, token_index):
"""
Get the index of the word that contains the token in one of the input sequences.
The returned word index is related to the input sequence that contains
the token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The index of the word in the relevant input sequence.
"""
pass
@property
def tokens(self):
"""
The generated tokens
They are the string representation of the IDs.
Returns:
:obj:`List[str]`: The list of tokens
"""
pass
def truncate(self, max_length, stride=0, direction="right"):
"""
Truncate the :class:`~tokenizers.Encoding` at the given length
If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
this information is lost. It will be considered as representing a single sequence.
Args:
max_length (:obj:`int`):
The desired length
stride (:obj:`int`, defaults to :obj:`0`):
The length of previous content to be included in each overflowing piece
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
@property
def type_ids(self):
"""
The generated type IDs
Generally used for tasks like sequence classification or question answering,
these tokens let the LM know which input sequence corresponds to each tokens.
Returns:
:obj:`List[int]`: The list of type ids
"""
pass
@property
def word_ids(self):
"""
The generated word indices.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
def word_to_chars(self, word_index, sequence_index=0):
"""
Get the offsets of the word at the given index in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
"""
pass
def word_to_tokens(self, word_index, sequence_index=0):
"""
Get the encoded tokens corresponding to the word at the given index
in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
"""
pass
@property
def words(self):
"""
The generated word indices.
.. warning::
This is deprecated and will be removed in a future version.
Please use :obj:`~tokenizers.Encoding.word_ids` instead.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
class NormalizedString:
"""
NormalizedString
A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
While making all the requested modifications, it keeps track of the alignment information
between the two versions of the string.
Args:
sequence: str:
The string sequence used to initialize this NormalizedString
"""
def append(self, s):
"""
Append the given sequence to the string
"""
pass
def clear(self):
"""
Clears the string
"""
pass
def filter(self, func):
"""
Filter each character of the string using the given func
"""
pass
def for_each(self, func):
"""
Calls the given function for each character of the string
"""
pass
def lowercase(self):
"""
Lowercase the string
"""
pass
def lstrip(self):
"""
Strip the left of the string
"""
pass
def map(self, func):
"""
Calls the given function for each character of the string
Replaces each character of the string using the returned value. Each
returned value **must** be a str of length 1 (ie a character).
"""
pass
def nfc(self):
"""
Runs the NFC normalization
"""
pass
def nfd(self):
"""
Runs the NFD normalization
"""
pass
def nfkc(self):
"""
Runs the NFKC normalization
"""
pass
def nfkd(self):
"""
Runs the NFKD normalization
"""
pass
@property
def normalized(self):
"""
The normalized part of the string
"""
pass
def prepend(self, s):
"""
Prepend the given sequence to the string
"""
pass
def replace(self, pattern, content):
"""
Replace the content of the given pattern with the provided content
Args:
pattern: Pattern:
A pattern used to match the string. Usually a string or a Regex
content: str:
The content to be used as replacement
"""
pass
def rstrip(self):
"""
Strip the right of the string
"""
pass
def slice(self, range):
"""
Slice the string using the given range
"""
pass
def split(self, pattern, behavior):
"""
Split the NormalizedString using the given pattern and the specified behavior
Args:
pattern: Pattern:
A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
behavior: SplitDelimiterBehavior:
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
Returns:
A list of NormalizedString, representing each split
"""
pass
def strip(self):
"""
Strip both ends of the string
"""
pass
def uppercase(self):
"""
Uppercase the string
"""
pass
class PreTokenizedString:
"""
PreTokenizedString
Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
underlying string, while keeping track of the alignment information (offsets).
The PreTokenizedString manages what we call `splits`. Each split represents a substring
which is a subpart of the original string, with the relevant offsets and tokens.
When calling one of the methods used to modify the PreTokenizedString (namely one of
`split`, `normalize` or `tokenize), only the `splits` that don't have any associated
tokens will get modified.
Args:
sequence: str:
The string sequence used to initialize this PreTokenizedString
"""
def __init__(self, sequence):
pass
def get_splits(self, offset_referential="original", offset_type="char"):
"""
Get the splits currently managed by the PreTokenizedString
Args:
offset_referential: :obj:`str`
Whether the returned splits should have offsets expressed relative
to the original string, or the normalized one. choices: "original", "normalized".
offset_type: :obj:`str`
Whether the returned splits should have offsets expressed in bytes or chars.
When slicing an str, we usually want to use chars, which is the default value.
Now in some cases it might be interesting to get these offsets expressed in bytes,
so it is possible to change this here.
choices: "char", "bytes"
Returns
A list of splits
"""
pass
def normalize(self, func):
"""
Normalize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[NormalizedString], None]:
The function used to normalize each underlying split. This function
does not need to return anything, just calling the methods on the provided
NormalizedString allow its modification.
"""
pass
def split(self, func):
"""
Split the PreTokenizedString using the given `func`
Args:
func: Callable[[index, NormalizedString], List[NormalizedString]]:
The function used to split each underlying split.
It is expected to return a list of `NormalizedString`, that represent the new
splits. If the given `NormalizedString` does not need any splitting, we can
just return it directly.
In order for the offsets to be tracked accurately, any returned `NormalizedString`
should come from calling either `.split` or `.slice` on the received one.
"""
pass
def to_encoding(self, type_id=0, word_idx=None):
"""
Return an Encoding generated from this PreTokenizedString
Args:
type_id: int = 0:
The type_id to be used on the generated Encoding.
word_idx: Optional[int] = None:
An optional word index to be used for each token of this Encoding. If provided,
all the word indices in the generated Encoding will use this value, instead
of the one automatically tracked during pre-tokenization.
Returns:
An Encoding
"""
pass
def tokenize(self, func):
"""
Tokenize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[str], List[Token]]:
The function used to tokenize each underlying split. This function must return
a list of Token generated from the input str.
"""
pass
class Regex:
"""
Instantiate a new Regex with the given pattern
"""
def __init__(self, pattern):
pass
class Token:
pass
class Tokenizer:
"""
A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
and outputs an :class:`~tokenizers.Encoding`.
Args:
model (:class:`~tokenizers.models.Model`):
The core algorithm that this :obj:`Tokenizer` should be using.
"""
def __init__(self, model):
pass
def add_special_tokens(self, tokens):
"""
Add the given special tokens to the Tokenizer.
If these tokens are already part of the vocabulary, it just let the Tokenizer know about
them. If they don't exist, the Tokenizer creates them, giving them a new id.
These special tokens will never be processed by the model (ie won't be split into
multiple tokens), and they can be removed from the output when decoding.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of special tokens we want to add to the vocabulary. Each token can either
be a string or an instance of :class:`~tokenizers.AddedToken` for more
customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def add_tokens(self, tokens):
"""
Add the given tokens to the vocabulary
The given tokens are added only if they don't already exist in the vocabulary.
Each token then gets a new attributed id.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of tokens we want to add to the vocabulary. Each token can be either a
string or an instance of :class:`~tokenizers.AddedToken` for more customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def decode(self, ids, skip_special_tokens=True):
"""
Decode the given list of ids back to a string
This is used to decode anything coming back from a Language Model
Args:
ids (A :obj:`List/Tuple` of :obj:`int`):
The list of ids that we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded string
Returns:
:obj:`str`: The decoded string
"""
pass
def decode_batch(self, sequences, skip_special_tokens=True):
"""
Decode a batch of ids back to their corresponding string
Args:
sequences (:obj:`List` of :obj:`List[int]`):
The batch of sequences we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded strings
Returns:
:obj:`List[str]`: A list of decoded strings
"""
pass
@property
def decoder(self):
"""
The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
"""
pass
def enable_padding(
self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
):
"""
Enable the padding
Args:
direction (:obj:`str`, `optional`, defaults to :obj:`right`):
The direction in which to pad. Can be either ``right`` or ``left``
pad_to_multiple_of (:obj:`int`, `optional`):
If specified, the padding length should always snap to the next multiple of the
given value. For example if we were going to pad witha length of 250 but
``pad_to_multiple_of=8`` then we will pad to 256.
pad_id (:obj:`int`, defaults to 0):
The id to be used when padding
pad_type_id (:obj:`int`, defaults to 0):
The type id to be used when padding
pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
The pad token to be used when padding
length (:obj:`int`, `optional`):
If specified, the length at which to pad. If not specified we pad using the size of
the longest sequence in a batch.
"""
pass
def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
"""
Enable truncation
Args:
max_length (:obj:`int`):
The max length at which to truncate
stride (:obj:`int`, `optional`):
The length of the previous first sequence to be included in the overflowing
sequence
strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
``only_second``.
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given sequence and pair. This method can process raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode("A single sequence")`
encode("A sequence", "And its pair")`
encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
encode(
[ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
is_pretokenized=True
)
Args:
sequence (:obj:`~tokenizers.InputSequence`):
The main input sequence we want to encode. This sequence can be either raw
text or pre-tokenized, according to the ``is_pretokenized`` argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
pair (:obj:`~tokenizers.InputSequence`, `optional`):
An optional input sequence. The expected format is the same that for ``sequence``.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The encoded result
"""
pass
def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given batch of inputs. This method accept both raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode_batch([
"A single sequence",
("A tuple with a sequence", "And its pair"),
[ "A", "pre", "tokenized", "sequence" ],
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
])
Args:
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
A list of single sequences or pair sequences to encode. Each sequence
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
"""
pass
@staticmethod
def from_buffer(buffer):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
Args:
buffer (:obj:`bytes`):
A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_file(path):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
Args:
path (:obj:`str`):
A path to a local JSON file representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_pretrained(identifier, revision="main", auth_token=None):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
Hugging Face Hub.
Args:
identifier (:obj:`str`):
The identifier of a Model on the Hugging Face Hub, that contains
a tokenizer.json file
revision (:obj:`str`, defaults to `main`):
A branch or commit id
auth_token (:obj:`str`, `optional`, defaults to `None`):
An optional auth token used to access private repositories on the
Hugging Face Hub
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_str(json):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
Args:
json (:obj:`str`):
A valid JSON string representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
def get_added_tokens_decoder(self):
"""
Get the underlying vocabulary
Returns:
:obj:`Dict[int, AddedToken]`: The vocabulary
"""
pass
def get_vocab(self, with_added_tokens=True):
"""
Get the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`Dict[str, int]`: The vocabulary
"""
pass
def get_vocab_size(self, with_added_tokens=True):
"""
Get the size of the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`int`: The size of the vocabulary
"""
pass
def id_to_token(self, id):
"""
Convert the given id to its corresponding token if it exists
Args:
id (:obj:`int`):
The id to convert
Returns:
:obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
"""
pass
@property
def model(self):
"""
The :class:`~tokenizers.models.Model` in use by the Tokenizer
"""
pass
def no_padding(self):
"""
Disable padding
"""
pass
def no_truncation(self):
"""
Disable truncation
"""
pass
@property
def normalizer(self):
"""
The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
"""
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
pass
@property
def padding(self):
"""
Get the current padding parameters
`Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current padding parameters if padding is enabled
"""
pass
def post_process(self, encoding, pair=None, add_special_tokens=True):
"""
Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to the set truncation params (provided with
:meth:`~tokenizers.Tokenizer.enable_truncation`)
2. Apply the :class:`~tokenizers.processors.PostProcessor`
3. Pad according to the set padding params (provided with
:meth:`~tokenizers.Tokenizer.enable_padding`)
Args:
encoding (:class:`~tokenizers.Encoding`):
The :class:`~tokenizers.Encoding` corresponding to the main sequence.
pair (:class:`~tokenizers.Encoding`, `optional`):
An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The final post-processed encoding
"""
pass
@property
def post_processor(self):
"""
The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
"""
pass
@property
def pre_tokenizer(self):
"""
The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
"""
pass
def save(self, path, pretty=True):
"""
Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
Args:
path (:obj:`str`):
A path to a file in which to save the serialized tokenizer.
pretty (:obj:`bool`, defaults to :obj:`True`):
Whether the JSON file should be pretty formatted.
"""
pass
def to_str(self, pretty=False):
"""
Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
Args:
pretty (:obj:`bool`, defaults to :obj:`False`):
Whether the JSON string should be pretty formatted.
Returns:
:obj:`str`: A string representing the serialized Tokenizer
"""
pass
def token_to_id(self, token):
"""
Convert the given token to its corresponding id if it exists
Args:
token (:obj:`str`):
The token to convert
Returns:
:obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
"""
pass
def train(self, files, trainer=None):
"""
Train the Tokenizer using the given files.
Reads the files line by line, while keeping all the whitespace, even new lines.
If you want to train from data store in-memory, you can check
:meth:`~tokenizers.Tokenizer.train_from_iterator`
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
"""
pass
def train_from_iterator(self, iterator, trainer=None, length=None):
"""
Train the Tokenizer using the provided iterator.
You can provide anything that is a Python Iterator
* A list of sequences :obj:`List[str]`
* A generator that yields :obj:`str` or :obj:`List[str]`
* A Numpy array of strings
* ...
Args:
iterator (:obj:`Iterator`):
Any iterator over strings or list of strings
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
pass
@property
def truncation(self):
"""
Get the currently set truncation parameters
`Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current truncation parameters if truncation is enabled
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py | from .visualizer import Annotation, EncodingVisualizer
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css | .tokenized-text {
width:100%;
padding:2rem;
max-height: 400px;
overflow-y: auto;
box-sizing:border-box;
line-height:4rem; /* Lots of space between lines */
font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
background-color: rgba(0,0,0,0.01);
letter-spacing:2px; /* Give some extra separation between chars */
}
.non-token{
/* White space and other things the tokenizer ignores*/
white-space: pre;
letter-spacing:4px;
border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
border-bottom:1px solid #A0A0A0;
line-height: 1rem;
height: calc(100% - 2px);
}
.token {
white-space: pre;
position:relative;
color:black;
letter-spacing:2px;
}
.annotation{
white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
border-radius:4px;
position:relative;
width:fit-content;
}
.annotation:before {
/*The before holds the text and the after holds the background*/
z-index:1000; /* Make sure this is above the background */
content:attr(data-label); /* The annotations label is on a data attribute */
color:white;
position:absolute;
font-size:1rem;
text-align:center;
font-weight:bold;
top:1.75rem;
line-height:0;
left:0;
width:100%;
padding:0.5rem 0;
/* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
overflow: hidden;
white-space: nowrap;
text-overflow:ellipsis;
}
.annotation:after {
content:attr(data-label); /* The content defines the width of the annotation*/
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
/* Nast hack below:
We set the annotations color in code because we don't know the colors at css time.
But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
So to get around that, annotations have the color set on them with a style attribute and then we
can get the color with currentColor.
Annotations wrap tokens and tokens set the color back to black
*/
background-color: currentColor;
}
.annotation:hover::after, .annotation:hover::before{
/* When the user hovers over an annotation expand the label to display in full
*/
min-width: fit-content;
}
.annotation:hover{
/* Emphasize the annotation start end with a border on hover*/
border-color: currentColor;
border: 2px solid;
}
.special-token:not(:empty){
/*
A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
*/
position:relative;
}
.special-token:empty::before{
/* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
content:attr(data-stok);
background:#202020;
font-size:0.75rem;
color:white;
margin: 0 0.25rem;
padding: 0.25rem;
border-radius:4px
}
.special-token:not(:empty):before {
/* Special tokens that have text (UNK) are displayed above the actual text*/
content:attr(data-stok);
position:absolute;
bottom:1.75rem;
min-width:100%;
width:100%;
height:1rem;
line-height:1rem;
font-size:1rem;
text-align:center;
color:white;
font-weight:bold;
background:#202020;
border-radius:10%;
}
/*
We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
instead we apply even and odd class at generation time and color them that way
*/
.even-token{
background:#DCDCDC ;
border: 1px solid #DCDCDC;
}
.odd-token{
background:#A0A0A0;
border: 1px solid #A0A0A0;
}
.even-token.multi-token,.odd-token.multi-token{
background: repeating-linear-gradient(
45deg,
transparent,
transparent 1px,
#ccc 1px,
#ccc 1px
),
/* on "bottom" */
linear-gradient(
to bottom,
#FFB6C1,
#999
);
}
.multi-token:hover::after {
content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
color:white;
background-color: black;
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
}
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py | import itertools
import os
import re
from string import Template
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
from tokenizers import Encoding, Tokenizer
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64
h = 10
colors = {}
for label in sorted(labels): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{' '.join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
charachter i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py | # Generated content DO NOT EDIT
from .. import processors
PostProcessor = processors.PostProcessor
BertProcessing = processors.BertProcessing
ByteLevel = processors.ByteLevel
RobertaProcessing = processors.RobertaProcessing
Sequence = processors.Sequence
TemplateProcessing = processors.TemplateProcessing
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi | # Generated content DO NOT EDIT
class PostProcessor:
"""
Base class for all post-processors
This class is not supposed to be instantiated directly. Instead, any implementation of
a PostProcessor will return an instance of this class when instantiated.
"""
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class BertProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Bert model:
- a SEP token
- a CLS token
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
"""
def __init__(self, sep, cls):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class ByteLevel(PostProcessor):
"""
This post-processor takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor must be used.
Args:
trim_offsets (:obj:`bool`):
Whether to trim the whitespaces from the produced offsets.
"""
def __init__(self, trim_offsets=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class RobertaProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Roberta model:
- a SEP token
- a CLS token
It also takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor should be initialized
with :obj:`trim_offsets=True`
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to trim the whitespaces from the produced offsets.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether the add_prefix_space option was enabled during pre-tokenization. This
is relevant because it defines the way the offsets are trimmed out.
"""
def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class Sequence(PostProcessor):
"""
Sequence Processor
Args:
processors (:obj:`List[PostProcessor]`)
The processors that need to be chained
"""
def __init__(self, processors):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class TemplateProcessing(PostProcessor):
"""
Provides a way to specify templates in order to add the special tokens to each
input sequence as relevant.
Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
sequences. The final result looks like this:
- Single sequence: :obj:`[CLS] Hello there [SEP]`
- Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
With the type ids as following::
[CLS] ... [SEP] ... [SEP]
0 0 0 1 1
You can achieve such behavior using a TemplateProcessing::
TemplateProcessing(
single="[CLS] $0 [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
In this example, each input sequence is identified using a ``$`` construct. This identifier
lets us specify each input sequence, and the type_id to use. When nothing is specified,
it uses the default values. Here are the different ways to specify it:
- Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
- Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
- Specifying both: ``$A:0``, ``$B:1``, ...
The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
**Warning**: You must ensure that you are giving the correct tokens/ids as these
will be added to the Encoding without any further check. If the given ids correspond
to something totally different in a `Tokenizer` using this `PostProcessor`, it
might lead to unexpected results.
Args:
single (:obj:`Template`):
The template used for single sequences
pair (:obj:`Template`):
The template used when both sequences are specified
special_tokens (:obj:`Tokens`):
The list of special tokens used in each sequences
Types:
Template (:obj:`str` or :obj:`List`):
- If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
- If a :obj:`List[str]` is provided, a list of tokens
Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
- A :obj:`Tuple` with both a token and its associated ID, in any order
- A :obj:`dict` with the following keys:
- "id": :obj:`str` => The special token id, as specified in the Template
- "ids": :obj:`List[int]` => The associated IDs
- "tokens": :obj:`List[str]` => The associated tokens
The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
the same length.
"""
def __init__(self, single, pair, special_tokens):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py | from .. import normalizers
Normalizer = normalizers.Normalizer
BertNormalizer = normalizers.BertNormalizer
NFD = normalizers.NFD
NFKD = normalizers.NFKD
NFC = normalizers.NFC
NFKC = normalizers.NFKC
Sequence = normalizers.Sequence
Lowercase = normalizers.Lowercase
Prepend = normalizers.Prepend
Strip = normalizers.Strip
StripAccents = normalizers.StripAccents
Nmt = normalizers.Nmt
Precompiled = normalizers.Precompiled
Replace = normalizers.Replace
NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
if normalizer not in NORMALIZERS:
raise ValueError(
"{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
)
return NORMALIZERS[normalizer]()
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi | # Generated content DO NOT EDIT
class Normalizer:
"""
Base class for all normalizers
This class is not supposed to be instantiated directly. Instead, any implementation of a
Normalizer will return an instance of this class when instantiated.
"""
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class BertNormalizer(Normalizer):
"""
BertNormalizer
Takes care of normalizing raw text before giving it to a Bert model.
This includes cleaning the text, handling accents, chinese chars and lowercasing
Args:
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to clean the text, by removing any control characters
and replacing all whitespaces by the classic one.
handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to handle chinese chars by putting spaces around them.
strip_accents (:obj:`bool`, `optional`):
Whether to strip all accents. If this option is not specified (ie == None),
then it will be determined by the value for `lowercase` (as in the original Bert).
lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase.
"""
def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Lowercase(Normalizer):
"""
Lowercase Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFC(Normalizer):
"""
NFC Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFD(Normalizer):
"""
NFD Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFKC(Normalizer):
"""
NFKC Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFKD(Normalizer):
"""
NFKD Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Nmt(Normalizer):
"""
Nmt normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Precompiled(Normalizer):
"""
Precompiled normalizer
Don't use manually it is used for compatiblity for SentencePiece.
"""
def __init__(self, precompiled_charsmap):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Prepend(Normalizer):
"""
Prepend normalizer
"""
def __init__(self, prepend):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Replace(Normalizer):
"""
Replace normalizer
"""
def __init__(self, pattern, content):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Sequence(Normalizer):
"""
Allows concatenating multiple other Normalizer as a Sequence.
All the normalizers run in sequence in the given order
Args:
normalizers (:obj:`List[Normalizer]`):
A list of Normalizer to be run as a sequence
"""
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Strip(Normalizer):
"""
Strip normalizer
"""
def __init__(self, left=True, right=True):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class StripAccents(Normalizer):
"""
StripAccents normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.