text
stringlengths 7
328k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
459
|
---|---|---|---|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class StableDiffusion2InpaintPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = StableDiffusionInpaintPipeline
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
image_params = frozenset(
[]
) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
image_latents_params = frozenset([])
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"})
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=9,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
# SD2-specific config below
attention_head_dim=(2, 4),
use_linear_projection=True,
)
scheduler = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
# SD2-specific config below
hidden_act="gelu",
projection_dim=512,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64))
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
}
return inputs
def test_stable_diffusion_inpaint(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionInpaintPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_inpaint_pipeline(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy"
)
model_id = "stabilityai/stable-diffusion-2-inpainting"
pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def test_stable_diffusion_inpaint_pipeline_fp16(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy"
)
model_id = "stabilityai/stable-diffusion-2-inpainting"
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16,
safety_checker=None,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
model_id = "stabilityai/stable-diffusion-2-inpainting"
pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id,
safety_checker=None,
scheduler=pndm,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
generator = torch.manual_seed(0)
_ = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
generator=generator,
num_inference_steps=2,
output_type="np",
)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py/0 | {
"file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py",
"repo_id": "diffusers",
"token_count": 4788
} | 140 |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNet2DConditionModel,
UNet2DModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
skip_mps,
torch_device,
)
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = UnCLIPImageVariationPipeline
params = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
batch_params = IMAGE_VARIATION_BATCH_PARAMS
required_optional_params = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModelWithProjection(config)
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
num_hidden_layers=5,
num_attention_heads=4,
image_size=32,
intermediate_size=37,
patch_size=1,
)
return CLIPVisionModelWithProjection(config)
@property
def dummy_text_proj(self):
torch.manual_seed(0)
model_kwargs = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
model = UnCLIPTextProjModel(**model_kwargs)
return model
@property
def dummy_decoder(self):
torch.manual_seed(0)
model_kwargs = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_super_res_kwargs(self):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def dummy_super_res_first(self):
torch.manual_seed(0)
model = UNet2DModel(**self.dummy_super_res_kwargs)
return model
@property
def dummy_super_res_last(self):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1)
model = UNet2DModel(**self.dummy_super_res_kwargs)
return model
def get_dummy_components(self):
decoder = self.dummy_decoder
text_proj = self.dummy_text_proj
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
super_res_first = self.dummy_super_res_first
super_res_last = self.dummy_super_res_last
decoder_scheduler = UnCLIPScheduler(
variance_type="learned_range",
prediction_type="epsilon",
num_train_timesteps=1000,
)
super_res_scheduler = UnCLIPScheduler(
variance_type="fixed_small_log",
prediction_type="epsilon",
num_train_timesteps=1000,
)
feature_extractor = CLIPImageProcessor(crop_size=32, size=32)
image_encoder = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def get_dummy_inputs(self, device, seed=0, pil_image=True):
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
if pil_image:
input_image = input_image * 0.5 + 0.5
input_image = input_image.clamp(0, 1)
input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
input_image = DiffusionPipeline.numpy_to_pil(input_image)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def test_unclip_image_variation_input_tensor(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
output = pipe(**pipeline_inputs)
image = output.images
tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
image_from_tuple = pipe(
**tuple_pipeline_inputs,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_unclip_image_variation_input_image(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
output = pipe(**pipeline_inputs)
image = output.images
tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
image_from_tuple = pipe(
**tuple_pipeline_inputs,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_unclip_image_variation_input_list_images(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
pipeline_inputs["image"] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
output = pipe(**pipeline_inputs)
image = output.images
tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
tuple_pipeline_inputs["image"] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
image_from_tuple = pipe(
**tuple_pipeline_inputs,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
expected_slice = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_unclip_passed_image_embed(self):
device = torch.device("cpu")
class DummyScheduler:
init_noise_sigma = 1
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0)
dtype = pipe.decoder.dtype
batch_size = 1
shape = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
decoder_latents = pipe.prepare_latents(
shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler()
)
shape = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
super_res_latents = pipe.prepare_latents(
shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler()
)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
img_out_1 = pipe(
**pipeline_inputs, decoder_latents=decoder_latents, super_res_latents=super_res_latents
).images
pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
# Don't pass image, instead pass embedding
image = pipeline_inputs.pop("image")
image_embeddings = pipe.image_encoder(image).image_embeds
img_out_2 = pipe(
**pipeline_inputs,
decoder_latents=decoder_latents,
super_res_latents=super_res_latents,
image_embeddings=image_embeddings,
).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_1 - img_out_2).max() < 1e-4
# Overriding PipelineTesterMixin::test_attention_slicing_forward_pass
# because UnCLIP GPU undeterminism requires a looser check.
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
expected_max_diff = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference, expected_max_diff=expected_max_diff
)
# Overriding PipelineTesterMixin::test_inference_batch_single_identical
# because UnCLIP undeterminism requires a looser check.
@unittest.skip("UnCLIP produces very large differences. Test is not useful.")
@skip_mps
def test_inference_batch_single_identical(self):
additional_params_copy_to_batched_inputs = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3
)
def test_inference_batch_consistent(self):
additional_params_copy_to_batched_inputs = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
batch_sizes = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=batch_sizes,
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs,
)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs
)
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@unittest.skip("UnCLIP produces very large difference. Test is not useful.")
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local(expected_max_difference=4e-3)
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@unittest.skip("UnCLIP produces very large difference in fp16 vs fp32. Test is not useful.")
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1.0)
@nightly
@require_torch_gpu
class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_unclip_image_variation_karlo(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy"
)
pipeline = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipeline(
input_image,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(image, expected_image, 15)
| diffusers/tests/pipelines/unclip/test_unclip_image_variation.py/0 | {
"file_path": "diffusers/tests/pipelines/unclip/test_unclip_image_variation.py",
"repo_id": "diffusers",
"token_count": 8161
} | 141 |
import tempfile
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVeScheduler
class ScoreSdeVeSchedulerTest(unittest.TestCase):
# TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration)
scheduler_classes = (ScoreSdeVeScheduler,)
forward_default_kwargs = ()
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
sample = torch.rand((batch_size, num_channels, height, width))
return sample
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = torch.arange(num_elems)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
sample = sample.permute(3, 0, 1, 2)
return sample
def dummy_model(self):
def model(sample, t, *args):
return sample * t / (t + 1)
return model
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 2000,
"snr": 0.15,
"sigma_min": 0.01,
"sigma_max": 1348,
"sampling_eps": 1e-5,
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
for scheduler_class in self.scheduler_classes:
sample = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
output = scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
new_output = new_scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
new_output = new_scheduler.step_correct(
residual, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
for scheduler_class in self.scheduler_classes:
sample = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
output = scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
new_output = new_scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
new_output = new_scheduler.step_correct(
residual, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
def test_timesteps(self):
for timesteps in [10, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_sigmas(self):
for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]):
self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max)
def test_time_indices(self):
for t in [0.1, 0.5, 0.75]:
self.check_over_forward(time_step=t)
def test_full_loop_no_noise(self):
kwargs = dict(self.forward_default_kwargs)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 3
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_sigmas(num_inference_steps)
scheduler.set_timesteps(num_inference_steps)
generator = torch.manual_seed(0)
for i, t in enumerate(scheduler.timesteps):
sigma_t = scheduler.sigmas[i]
for _ in range(scheduler.config.correct_steps):
with torch.no_grad():
model_output = model(sample, sigma_t)
sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample
with torch.no_grad():
model_output = model(sample, sigma_t)
output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs)
sample, _ = output.prev_sample, output.prev_sample_mean
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert np.isclose(result_sum.item(), 14372758528.0)
assert np.isclose(result_mean.item(), 18714530.0)
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
| diffusers/tests/schedulers/test_scheduler_score_sde_ve.py/0 | {
"file_path": "diffusers/tests/schedulers/test_scheduler_score_sde_ve.py",
"repo_id": "diffusers",
"token_count": 3215
} | 142 |
# Stable Diffusion Deep Dive
<CourseFloatingBanner unit={3}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Stable Diffusion Deep Dive", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/en/unit3/stable_diffusion_deep_dive.ipynb"},
{label: "Stable Diffusion Deep Dive", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/en/unit3/stable_diffusion_deep_dive.ipynb"},
]} />
Stable Diffusion is a powerful text-to-image model. There are various websites and tools to make using it as easy as possible. It is also integrated into the Huggingface diffusers library where generating images can be as simple as:
```py
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True).to("cuda")
image = pipe("An astronaught scuba diving").images[0]
```
In this notebook we're going to dig into the code behind these easy-to-use interfaces, to see what is going on under the hood. We'll begin by re-creating the functionality above as a scary chunk of code, and then one by one we'll inspect the different components and figure out what they do. By the end of this notebook that same sampling loop should feel like something you can tweak and modify as you like.
## Setup & Imports
You'll need to log into huggingface and accept the terms of the licence for this model - see the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. And when you first run this notebook you need to uncomment the following two cells to install the requirements and log in to huggingface with an access token.
```py
# !pip install -q --upgrade transformers diffusers ftfy
```
```py
from base64 import b64encode
import numpy
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
from huggingface_hub import notebook_login
# For video display:
from IPython.display import HTML
from matplotlib import pyplot as plt
from pathlib import Path
from PIL import Image
from torch import autocast
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, logging
torch.manual_seed(1)
if not (Path.home()/'.huggingface'/'token').exists(): notebook_login()
# Supress some unnecessary warnings when loading the CLIPTextModel
logging.set_verbosity_error()
# Set device
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
```
## Loading the models
This code (and that in the next section) comes from the [Huggingface example notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb).
This will download and set up the relevant models and components we'll be using. Let's just run this for now and move on to the next section to check that it all works before diving deeper.
If you've loaded a pipeline, you can also access these components using `pipe.unet`, `pipe.vae` and so on.
In this notebook we aren't doing any memory-saving tricks - if you find yourself running out of GPU RAM, look at the pipeline code for inspiration with things like attention slicing, switching to half precision (fp16), keeping the VAE on the CPU and other modifications.
```py
# Load the autoencoder model which will be used to decode the latents into image space.
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
# Load the tokenizer and text encoder to tokenize and encode the text.
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
# The UNet model for generating the latents.
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
# The noise scheduler
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
# To the GPU we go!
vae = vae.to(torch_device)
text_encoder = text_encoder.to(torch_device)
unet = unet.to(torch_device);
```
## A diffusion loop
If all you want is to make a picture with some text, you could ignore this notebook and use one of the existing tools (such as [DreamStudio](https://beta.dreamstudio.ai/generate)) or use the simplified pipeline from huggingface, as documented [here](https://huggingface.co/blog/stable_diffusion).
What we want to do in this notebook is dig a little deeper into how this works, so we'll start by checking that the example code runs. Again, this is adapted from the [HF notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb) and looks very similar to what you'll find if you inspect the [__call__() method](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L200) of the stable diffusion pipeline.
```py
# Some settings
prompt = ["A watercolor painting of an otter"]
height = 512 # default height of Stable Diffusion
width = 512 # default width of Stable Diffusion
num_inference_steps = 30 # Number of denoising steps
guidance_scale = 7.5 # Scale for classifier-free guidance
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
batch_size = 1
# Prep text
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
scheduler.set_timesteps(num_inference_steps)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma # Scaling (previous versions did latents = latents * self.scheduler.sigmas[0]
# Loop
with autocast("cuda"):
for i, t in tqdm(enumerate(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
# Scale the latents (preconditioning):
# latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5) # Diffusers 0.3 and below
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
# latents = scheduler.step(noise_pred, i, latents)["prev_sample"] # Diffusers 0.3 and below
latents = scheduler.step(noise_pred, t, latents).prev_sample
# scale and decode the image latents with vae
latents = 1 / 0.18215 * latents
with torch.no_grad():
image = vae.decode(latents).sample
# Display
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
pil_images[0]
```
It's working, but that's quite a bit of code! Let's look at the components one by one.
## The Autoencoder (AE)
The AE can 'encode' an image into some sort of latent representation, and decode this back into an image. I've wrapped the code for this into a couple of functions here so we can see what this looks like in action:
```py
def pil_to_latent(input_im):
# Single image -> single latent in a batch (so size 1, 4, 64, 64)
with torch.no_grad():
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
return 0.18215 * latent.latent_dist.sample()
def latents_to_pil(latents):
# bath of latents -> list of images
latents = (1 / 0.18215) * latents
with torch.no_grad():
image = vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
We'll use a pic from the web here, but you can load your own instead by uploading it and editing the filename in the next cell.
```py
# Download a demo Image
!curl --output macaw.jpg 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg'
```
```py
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 62145 100 62145 0 0 10874 0 0:00:05 0:00:05 --:--:-- 15633
```
```py
# Load the image with PIL
input_image = Image.open('macaw.jpg').resize((512, 512))
input_image
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
Encoding this into the latent space of the AE with the function defined above looks like this:
```py
# Encode to the latent space
encoded = pil_to_latent(input_image)
encoded.shape
```
```py
torch.Size([1, 4, 64, 64])
```
```py
# Let's visualize the four channels of this latent representation:
fig, axs = plt.subplots(1, 4, figsize=(16, 4))
for c in range(4):
axs[c].imshow(encoded[0][c].cpu(), cmap='Greys')
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
This 4x64x64 tensor captures lots of information about the image, hopefully enough that when we feed it through the decoder we get back something very close to our input image:
```py
# Decode this latent representation back into an image
decoded = latents_to_pil(encoded)[0]
decoded
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
You'll see some small differences if you squint! Forcus on the eye if you can't see anything obvious. This is pretty impressive - that 4x64x64 latent seems to hold a lot more information that a 64px image...
This autoencoder has been trained to squish down an image to a smaller representation and then re-create the image back from this compressed version again.
In this particular case the compression factor is 48, we start with a 3x512x512(chxhtxwd) image and it get compressed to a latent vector 4x64x64. Each 3x8x8 pixel volume in the input image gets compressed down to just 4 numbers(4x1x1). You can find AEs with a higher compression ratio (eg f16 like some popular VQGAN models) but at some point they begin to introduce artifacts that we don't want.
Why do we even use an autoencoder? We can do diffusion in pixel space - where the model gets all the image data as inputs and produces an output prediction of the same shape. But this means processing a LOT of data, and make high-resolution generation very computationally expensive. Some solutions to this involve doing diffusion at low resolution (64px for eg) and then training a separate model to upscale repeatedly (as with D2/Imagen). But latent diffusion instead does the diffusion process in this 'latent space', using the compressed representations from our AE rather than raw images. These representations are information rich, and can be small enough to handle manageably on consumer hardware. Once we've generated a new 'image' as a latent representation, the autoencoder can take those final latent outputs and turn them into actual pixels.
## The Scheduler
Now we need to talk about adding noise...
During training, we add some noise to an image an then have the model try to predict the noise. If we always added a ton of noise, the model might not have much to work with. If we only add a tiny amount, the model won't be able to do much with the random starting points we use for sampling. So during training the amount is varied, according to some distribution.
During sampling, we want to 'denoise' over a number of steps. How many steps and how much noise we should aim for at each step are going to affect the final result.
The scheduler is in charge of handling all of these details. For example: `scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)` sets up a scheduler that matches the one used to train this model. When we want to sample over a smaller number of steps, we set this up with scheduler.set_timesteps:
```py
# Setting the number of sampling steps:
scheduler.set_timesteps(15)
```
You can see how our new set of steps corresponds to those used in training:
```py
# See these in terms of the original 1000 steps used for training:
print(scheduler.timesteps)
```
```py
tensor([999.0000, 927.6429, 856.2857, 784.9286, 713.5714, 642.2143, 570.8571,
499.5000, 428.1429, 356.7857, 285.4286, 214.0714, 142.7143, 71.3571,
0.0000], dtype=torch.float64)
```
And how much noise is present at each:
```py
# Look at the equivalent noise levels:
print(scheduler.sigmas)
```
```py
tensor([14.6146, 9.6826, 6.6780, 4.7746, 3.5221, 2.6666, 2.0606, 1.6156,
1.2768, 1.0097, 0.7913, 0.6056, 0.4397, 0.2780, 0.0292, 0.0000])
```
During sampling, we'll start at a high noise level (in fact, our input will be pure noise) and gradually 'denoise' down to an image, according to this schedule.
```py
# Plotting this noise schedule:
plt.plot(scheduler.sigmas)
plt.title('Noise Schedule')
plt.xlabel('Sampling step')
plt.ylabel('sigma')
plt.show()
```
This 'sigma' is the amount of noise added to the latent representation. Let's visualize what this looks like by adding a bit of noise to our encoded image and then decoding this noised version:
```py
noise = torch.randn_like(encoded) # Random noise
sampling_step = 10 # Equivalent to step 10 out of 15 in the schedule above
# encoded_and_noised = scheduler.add_noise(encoded, noise, timestep) # Diffusers 0.3 and below
encoded_and_noised = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[sampling_step]]))
latents_to_pil(encoded_and_noised.float())[0] # Display
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
What does this look like at different timesteps? Experiment and see for yourself!
If you uncomment the cell below you'll see that in this case the `scheduler.add_noise` function literally just adds noise scaled by sigma: `noisy_samples = original_samples + noise * sigmas`
```py
# ??scheduler.add_noise
```
Other diffusion models may be trained with different noising and scheduling approaches, some of which keep the variance fairly constant across noise levels ('variance preserving') with different scaling and mixing tricks instead of having noisy latents with higher and higher variance as more noise is added ('variance exploding').
If we want to start from random noise instead of a noised image, we need to scale it by the largest sigma value used during training, ~14 in this case. And before these noisy latents are fed to the model they are scaled again in the so-called pre-conditioning step: `latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)` (now handled by `latent_model_input = scheduler.scale_model_input(latent_model_input, t)`).
Again, this scaling/pre-conditioning differs between papers and implementations, so keep an eye out for this if you work with a different type of diffusion model.
## Loop starting from noised version of input (AKA image2image)
Let's see what happens when we use our image as a starting point, adding some noise and then doing the final few denoising steps in the loop with a new prompt.
We'll use a similar loop to the first demo, but we'll skip the first `start_step` steps.
To noise our image we'll use code like that shown above, using the scheduler to noise it to a level equivalent to step 10 (`start_step`).
```py
# Settings (same as before except for the new prompt)
prompt = ["A colorful dancer, nat geo photo"]
height = 512 # default height of Stable Diffusion
width = 512 # default width of Stable Diffusion
num_inference_steps = 50 # Number of denoising steps
guidance_scale = 8 # Scale for classifier-free guidance
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
batch_size = 1
# Prep text (same as before)
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler (setting the number of inference steps)
scheduler.set_timesteps(num_inference_steps)
# Prep latents (noising appropriately for start_step)
start_step = 10
start_sigma = scheduler.sigmas[start_step]
noise = torch.randn_like(encoded)
latents = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[start_step]]))
latents = latents.to(torch_device).float()
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps)):
if i >= start_step: # << This is the only modification to the loop we do
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
latents_to_pil(latents)[0]
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
You can see that some colours and structure from the image are kept, but we now have a new picture! The more noise you add and the more steps you do, the further away it gets from the input image.
This is how the popular img2img pipeline works. Again, if this is your end goal there are tools to make this easy!
But you can see that under the hood this is the same as the generation loop just skipping the first few steps and starting from a noised image rather than pure noise.
Explore changing how many steps are skipped and see how this affects the amount the image changes from the input.
Exploring the text -> embedding pipeline
We use a text encoder model to turn our text into a set of 'embeddings' which are fed to the diffusion model as conditioning. Let's follow a piece of text through this process and see how it works.
```py
# Our text prompt
prompt = 'A picture of a puppy'
```
We begin with tokenization:
```py
# Turn the text into a sequnce of tokens:
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
text_input['input_ids'][0] # View the tokens
```
```py
tensor([49406, 320, 1674, 539, 320, 6829, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407, 49407,
49407, 49407, 49407, 49407, 49407, 49407, 49407])
```
```py
# See the individual tokens
for t in text_input['input_ids'][0][:8]: # We'll just look at the first 7 to save you from a wall of '<|endoftext|>'
print(t, tokenizer.decoder.get(int(t)))
```
```py
tensor(49406) <|startoftext|>
tensor(320) a</w>
tensor(1674) picture</w>
tensor(539) of</w>
tensor(320) a</w>
tensor(6829) puppy</w>
tensor(49407) <|endoftext|>
tensor(49407) <|endoftext|>
```
We can jump straight to the final (output) embeddings like so:
```py
# Grab the output embeddings
output_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
print('Shape:', output_embeddings.shape)
output_embeddings
```
```py
Shape: torch.Size([1, 77, 768])
tensor([[[-0.3884, 0.0229, -0.0522, ..., -0.4899, -0.3066, 0.0675],
[ 0.0290, -1.3258, 0.3085, ..., -0.5257, 0.9768, 0.6652],
[ 0.6942, 0.3538, 1.0991, ..., -1.5716, -1.2643, -0.0121],
...,
[-0.0221, -0.0053, -0.0089, ..., -0.7303, -1.3830, -0.3011],
[-0.0062, -0.0246, 0.0065, ..., -0.7326, -1.3745, -0.2953],
[-0.0536, 0.0269, 0.0444, ..., -0.7159, -1.3634, -0.3075]]],
device='cuda:0', grad_fn=<NativeLayerNormBackward0>)
```
We pass our tokens through the text_encoder and we magically get some numbers we can feed to the model.
How are these generated? The tokens are transformed into a set of input embeddings, which are then fed through the transformer model to get the final output embeddings.
To get these input embeddings, there are actually two steps - as revealed by inspecting `text_encoder.text_model.embeddings`:
```py
text_encoder.text_model.embeddings
```
```py
CLIPTextEmbeddings(
(token_embedding): Embedding(49408, 768)
(position_embedding): Embedding(77, 768)
)
```
## Token embeddings
The token is fed to the `token_embedding` to transform it into a vector. The function name `get_input_embeddings` here is misleading since these token embeddings need to be combined with the position embeddings before they are actually used as inputs to the model! Anyway, let's look at just the token embedding part first
We can look at the embedding layer:
```py
# Access the embedding layer
token_emb_layer = text_encoder.text_model.embeddings.token_embedding
token_emb_layer # Vocab size 49408, emb_dim 768
```
```py
Embedding(49408, 768)
```
And embed a token like so:
```py
# Embed a token - in this case the one for 'puppy'
embedding = token_emb_layer(torch.tensor(6829, device=torch_device))
embedding.shape # 768-dim representation
```
```py
torch.Size([768])
```
This single token has been mapped to a 768-dimensional vector - the token embedding.
We can do the same with all of the tokens in the prompt to get all the token embeddings:
```py
token_embeddings = token_emb_layer(text_input.input_ids.to(torch_device))
print(token_embeddings.shape) # batch size 1, 77 tokens, 768 values for each
token_embeddings
```
```py
torch.Size([1, 77, 768])
tensor([[[ 0.0011, 0.0032, 0.0003, ..., -0.0018, 0.0003, 0.0019],
[ 0.0013, -0.0011, -0.0126, ..., -0.0124, 0.0120, 0.0080],
[ 0.0235, -0.0118, 0.0110, ..., 0.0049, 0.0078, 0.0160],
...,
[ 0.0012, 0.0077, -0.0011, ..., -0.0015, 0.0009, 0.0052],
[ 0.0012, 0.0077, -0.0011, ..., -0.0015, 0.0009, 0.0052],
[ 0.0012, 0.0077, -0.0011, ..., -0.0015, 0.0009, 0.0052]]],
device='cuda:0', grad_fn=<EmbeddingBackward0>)
```
## Positional Embeddings
Positional embeddings tell the model where in a sequence a token is. Much like the token embedding, this is a set of (optionally learnable) parameters. But now instead of dealing with ~50k tokens we just need one for each position (77 total):
```py
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
pos_emb_layer
```
```py
Embedding(77, 768)
```
We can get the positional embedding for each position:
```py
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
position_embeddings = pos_emb_layer(position_ids)
print(position_embeddings.shape)
position_embeddings
```
```py
torch.Size([1, 77, 768])
tensor([[[ 0.0016, 0.0020, 0.0002, ..., -0.0013, 0.0008, 0.0015],
[ 0.0042, 0.0029, 0.0002, ..., 0.0010, 0.0015, -0.0012],
[ 0.0018, 0.0007, -0.0012, ..., -0.0029, -0.0009, 0.0026],
...,
[ 0.0216, 0.0055, -0.0101, ..., -0.0065, -0.0029, 0.0037],
[ 0.0188, 0.0073, -0.0077, ..., -0.0025, -0.0009, 0.0057],
[ 0.0330, 0.0281, 0.0289, ..., 0.0160, 0.0102, -0.0310]]],
device='cuda:0', grad_fn=<EmbeddingBackward0>)
```
## Combining token and position embeddings
Time to combine the two. How do we do this? Just add them! Other approaches are possible but for this model this is how it is done.
Combining them in this way gives us the final input embeddings ready to feed through the transformer model:
```py
# And combining them we get the final input embeddings
input_embeddings = token_embeddings + position_embeddings
print(input_embeddings.shape)
input_embeddings
```
```py
torch.Size([1, 77, 768])
tensor([[[ 2.6770e-03, 5.2133e-03, 4.9323e-04, ..., -3.1321e-03,
1.0659e-03, 3.4316e-03],
[ 5.5371e-03, 1.7510e-03, -1.2381e-02, ..., -1.1410e-02,
1.3508e-02, 6.8378e-03],
[ 2.5356e-02, -1.1019e-02, 9.7663e-03, ..., 1.9460e-03,
6.8375e-03, 1.8573e-02],
...,
[ 2.2781e-02, 1.3262e-02, -1.1241e-02, ..., -8.0054e-03,
-2.0560e-03, 8.9366e-03],
[ 2.0026e-02, 1.5015e-02, -8.7638e-03, ..., -4.0313e-03,
1.8487e-05, 1.0885e-02],
[ 3.4206e-02, 3.5826e-02, 2.7768e-02, ..., 1.4465e-02,
1.1110e-02, -2.5745e-02]]], device='cuda:0', grad_fn=<AddBackward0>)
```
We can check that these are the same as the result we'd get from `text_encoder.text_model.embeddings`:
```py
# The following combines all the above steps (but doesn't let us fiddle with them!)
text_encoder.text_model.embeddings(text_input.input_ids.to(torch_device))
```
```py
tensor([[[ 2.6770e-03, 5.2133e-03, 4.9323e-04, ..., -3.1321e-03,
1.0659e-03, 3.4316e-03],
[ 5.5371e-03, 1.7510e-03, -1.2381e-02, ..., -1.1410e-02,
1.3508e-02, 6.8378e-03],
[ 2.5356e-02, -1.1019e-02, 9.7663e-03, ..., 1.9460e-03,
6.8375e-03, 1.8573e-02],
...,
[ 2.2781e-02, 1.3262e-02, -1.1241e-02, ..., -8.0054e-03,
-2.0560e-03, 8.9366e-03],
[ 2.0026e-02, 1.5015e-02, -8.7638e-03, ..., -4.0313e-03,
1.8487e-05, 1.0885e-02],
[ 3.4206e-02, 3.5826e-02, 2.7768e-02, ..., 1.4465e-02,
1.1110e-02, -2.5745e-02]]], device='cuda:0', grad_fn=<AddBackward0>)
```
## Feeding these through the transformer model
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
We want to mess with these input embeddings (specifically the token embeddings) before we send them through the rest of the model, but first we should check that we know how to do that. I read the code of the text_encoders `forward` method, and based on that the code for the `forward` method of the text_model that the text_encoder wraps. To inspect it yourself, type `??text_encoder.text_model.forward` and you'll get the function info and source code - a useful debugging trick!
Anyway, based on that we can copy in the bits we need to get the so-called 'last hidden state' and thus generate our final embeddings:
```py
def get_output_embeds(input_embeddings):
# CLIP's text model uses causal mask, so we prepare it here:
bsz, seq_len = input_embeddings.shape[:2]
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype)
# Getting the output embeddings involves calling the model with passing output_hidden_states=True
# so that it doesn't just return the pooled final predictions:
encoder_outputs = text_encoder.text_model.encoder(
inputs_embeds=input_embeddings,
attention_mask=None, # We aren't using an attention mask so that can be None
causal_attention_mask=causal_attention_mask.to(torch_device),
output_attentions=None,
output_hidden_states=True, # We want the output embs not the final output
return_dict=None,
)
# We're interested in the output hidden state only
output = encoder_outputs[0]
# There is a final layer norm we need to pass these through
output = text_encoder.text_model.final_layer_norm(output)
# And now they're ready!
return output
out_embs_test = get_output_embeds(input_embeddings) # Feed through the model with our new function
print(out_embs_test.shape) # Check the output shape
out_embs_test # Inspect the output
```
```py
torch.Size([1, 77, 768])
tensor([[[-0.3884, 0.0229, -0.0522, ..., -0.4899, -0.3066, 0.0675],
[ 0.0290, -1.3258, 0.3085, ..., -0.5257, 0.9768, 0.6652],
[ 0.6942, 0.3538, 1.0991, ..., -1.5716, -1.2643, -0.0121],
...,
[-0.0221, -0.0053, -0.0089, ..., -0.7303, -1.3830, -0.3011],
[-0.0062, -0.0246, 0.0065, ..., -0.7326, -1.3745, -0.2953],
[-0.0536, 0.0269, 0.0444, ..., -0.7159, -1.3634, -0.3075]]],
device='cuda:0', grad_fn=<NativeLayerNormBackward0>)
```
Note that these match the `output_embeddings` we saw near the start - we've figured out how to split up that one step ("get the text embeddings") into multiple sub-steps ready for us to modify.
Now that we have this process in place, we can replace the input embedding of a token with a new one of our choice - which in our final use-case will be something we learn. To demonstrate the concept though, let's replace the input embedding for 'puppy' in the prompt we've been playing with with the embedding for token 2368, get a new set of output embeddings based on this, and use these to generate an image to see what we get:
```py
prompt = 'A picture of a puppy'
# Tokenize
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Get token embeddings
token_embeddings = token_emb_layer(input_ids)
# The new embedding. In this case just the input embedding of token 2368...
replacement_token_embedding = text_encoder.get_input_embeddings()(torch.tensor(2368, device=torch_device))
# Insert this into the token embeddings (
token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combine with pos embs
input_embeddings = token_embeddings + position_embeddings
# Feed through to get final output embs
modified_output_embeddings = get_output_embeds(input_embeddings)
print(modified_output_embeddings.shape)
modified_output_embeddings
```
```py
torch.Size([1, 77, 768])
tensor([[[-0.3884, 0.0229, -0.0522, ..., -0.4899, -0.3066, 0.0675],
[ 0.0290, -1.3258, 0.3085, ..., -0.5257, 0.9768, 0.6652],
[ 0.6942, 0.3538, 1.0991, ..., -1.5716, -1.2643, -0.0121],
...,
[-0.6034, -0.5322, 0.0629, ..., -0.3964, 0.0877, -0.9558],
[-0.5936, -0.5407, 0.0731, ..., -0.3876, 0.0906, -0.9436],
[-0.6393, -0.4703, 0.1103, ..., -0.3904, 0.1351, -0.9726]]],
device='cuda:0', grad_fn=<NativeLayerNormBackward0>)
```
The first few are the same, the last aren't. Everything at and after the position of the token we're replacing will be affected.
If all went well, we should see something other than a puppy when we use these to generate an image. And sure enough, we do!
```py
#Generating an image with these modified embeddings
def generate_with_embs(text_embeddings):
height = 512 # default height of Stable Diffusion
width = 512 # default width of Stable Diffusion
num_inference_steps = 30 # Number of denoising steps
guidance_scale = 7.5 # Scale for classifier-free guidance
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
batch_size = 1
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
scheduler.set_timesteps(num_inference_steps)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
return latents_to_pil(latents)[0]
```
```py
generate_with_embs(modified_output_embeddings)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
Suprise! Now you know what token 2368 means ;)
What can we do with this? Why did we go to all of this trouble? Well, we'll see a more compelling use-case shortly but the tl;dr is that once we can access and modify the token embeddings we can do tricks like replacing them with something else. In the example we just did, that was just another token embedding from the model's vocabulary, equivalent to just editing the prompt. But we can also mix tokens - for example, here's a half-puppy-half-skunk:
```py
# In case you're wondering how to get the token for a word, or the embedding for a token:
prompt = 'skunk'
print('tokenizer(prompt):', tokenizer(prompt))
print('token_emb_layer([token_id]) shape:', token_emb_layer(torch.tensor([8797], device=torch_device)).shape)
```
```py
tokenizer(prompt): {'input_ids': [49406, 42194, 49407], 'attention_mask': [1, 1, 1]}
token_emb_layer([token_id]) shape: torch.Size([1, 768])
```
```py
prompt = 'A picture of a puppy'
# Tokenize
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Get token embeddings
token_embeddings = token_emb_layer(input_ids)
# The new embedding. Which is now a mixture of the token embeddings for 'puppy' and 'skunk'
puppy_token_embedding = token_emb_layer(torch.tensor(6829, device=torch_device))
skunk_token_embedding = token_emb_layer(torch.tensor(42194, device=torch_device))
replacement_token_embedding = 0.5*puppy_token_embedding + 0.5*skunk_token_embedding
# Insert this into the token embeddings (
token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combine with pos embs
input_embeddings = token_embeddings + position_embeddings
# Feed through to get final output embs
modified_output_embeddings = get_output_embeds(input_embeddings)
# Generate an image with these
generate_with_embs(modified_output_embeddings)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
## Textual Inversion
OK, so we can slip in a modified token embedding, and use this to generate an image. We used the token embedding for 'cat' in the above example, but what if instead could 'learn' a new token embedding for a specific concept? This is the idea behind 'Textual Inversion', in which a few example images are used to create a new token embedding:
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
Diagram from the textual inversion [blog post](https://textual-inversion.github.io/) - note it doesn't show the positional embeddings step for simplicity
We won't cover how this training works, but we can try loading one of these new 'concepts' from the [community-created SD concepts library](https://huggingface.co/sd-concepts-library) and see how it fits in with our example above. I'll use [https://huggingface.co/sd-concepts-library/birb-style](https://huggingface.co/sd-concepts-library/birb-style) since it was the first one I made :) Download the learned_embeds.bin file from there and upload the file to wherever this notebook is before running this next cell:
```py
birb_embed = torch.load('learned_embeds.bin')
birb_embed.keys(), birb_embed['<birb-style>'].shape
```
```py
(dict_keys(['<birb-style>']), torch.Size([768]))
```
We get a dictionary with a key (the special placeholder I used, ) and the corresponding token embedding. As in the previous example, let's replace the 'puppy' token embedding with this and see what happens:
```py
prompt = 'A mouse in the style of puppy'
# Tokenize
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Get token embeddings
token_embeddings = token_emb_layer(input_ids)
# The new embedding - our special birb word
replacement_token_embedding = birb_embed['<birb-style>'].to(torch_device)
# Insert this into the token embeddings
token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combine with pos embs
input_embeddings = token_embeddings + position_embeddings
# Feed through to get final output embs
modified_output_embeddings = get_output_embeds(input_embeddings)
# And generate an image with this:
generate_with_embs(modified_output_embeddings)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
The token for 'puppy' was replaced with one that captures a particular style of painting, but it could just as easily represent a specific object or class of objects.
Again, there is a [nice inference notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) from hf to make it easy to use the different concepts, that properly handles using the names in prompts ("A <cat-toy> in the style of <birb-style>") without worrying about all this manual stuff. The goal of this notebook is to pull back the curtain a bit so you know what is going on behind the scenes :)
## Messing with Embeddings
Besides just replacing the token embedding of a single word, there are various other tricks we can try. For example, what if we create a 'chimera' by averaging the embeddings of two different prompts?
```py
# Embed two prompts
text_input1 = tokenizer(["A mouse"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
text_input2 = tokenizer(["A leopard"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings1 = text_encoder(text_input1.input_ids.to(torch_device))[0]
text_embeddings2 = text_encoder(text_input2.input_ids.to(torch_device))[0]
# Mix them together
mix_factor = 0.35
mixed_embeddings = (text_embeddings1*mix_factor + \
text_embeddings2*(1-mix_factor))
# Generate!
generate_with_embs(mixed_embeddings)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
## The UNET and CFG
Now it's time we looked at the actual diffusion model. This is typically a Unet that takes in the noisy latents (x) and predicts the noise. We use a conditional model that also takes in the timestep (t) and our text embedding (aka encoder_hidden_states) as conditioning. Feeding all of these into the model looks like this: `noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]`
We can try it out and see what the output looks like:
```py
# Prep Scheduler
scheduler.set_timesteps(num_inference_steps)
# What is our timestep
t = scheduler.timesteps[0]
sigma = scheduler.sigmas[0]
# A noisy latent
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Text embedding
text_input = tokenizer(['A macaw'], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
# Run this through the unet to predict the noise residual
with torch.no_grad():
noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]
latents.shape, noise_pred.shape # We get preds in the same shape as the input
```
```py
(torch.Size([1, 4, 64, 64]), torch.Size([1, 4, 64, 64]))
```
Given a set of noisy latents, the model predicts the noise component. We can remove this noise from the noisy latents to see what the output image looks like (`latents_x0 = latents - sigma * noise_pred`). And we can add most of the noise back to this predicted output to get the (slightly less noisy hopefully) input for the next diffusion step. To visualize this let's generate another image, saving both the predicted output (x0) and the next step (xt-1) after every step:
```py
prompt = 'Oil painting of an otter in a top hat'
height = 512
width = 512
num_inference_steps = 50
guidance_scale = 8
generator = torch.manual_seed(32)
batch_size = 1
# Make a folder to store results
!rm -rf steps/
!mkdir -p steps/
# Prep text
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
scheduler.set_timesteps(num_inference_steps)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# Get the predicted x0:
# latents_x0 = latents - sigma * noise_pred # Calculating ourselves
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample # Using the scheduler (Diffusers 0.4 and above)
# compute the previous noisy sample x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
# To PIL Images
im_t0 = latents_to_pil(latents_x0)[0]
im_next = latents_to_pil(latents)[0]
# Combine the two images and save for later viewing
im = Image.new('RGB', (1024, 512))
im.paste(im_next, (0, 0))
im.paste(im_t0, (512, 0))
im.save(f'steps/{i:04}.jpeg')
```
```py
# Make and show the progress video (change width to 1024 for full res)
!ffmpeg -v 1 -y -f image2 -framerate 12 -i steps/%04d.jpeg -c:v libx264 -preset slow -qp 18 -pix_fmt yuv420p out.mp4
mp4 = open('out.mp4','rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
HTML("""
<video width=600 controls>
<source src="%s" type="video/mp4">
</video>
""" % data_url)
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
The version on the right shows the predicted 'final output' (x0) at each step, and this is what is usually used for progress videos etc. The version on the left is the 'next step'. I found it interesteing to compare the two - watching the progress videos only you'd think drastic changes are happening expecially at early stages, but since the changes made per-step are relatively small the actual process is much more gradual.
## Classifier Free Guidance
By default, the model doesn't often do what we ask. If we want it to follow the prompt better, we use a hack called CFG. There's a good explanation in this video (AI coffee break GLIDE).
In the code, this comes down to us doing:
`noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)`
This works suprisingly well :) Explore changing the guidance_scale in the code above and see how this affects the results. How high can you push it before the results get worse?
## Sampling
There is still some complexity hidden from us inside `latents = scheduler.step(noise_pred, i, latents)["prev_sample"]`. How exactly does the sampler go from the current noisy latents to a slightly less noisy version? Why don't we just use the model in a single step? Are there other ways to view this?
The model tries to predict the noise in an image. For low noise values, we assume it does a pretty good job. For higher noise levels, it has a hard task! So instead of producing a perfect image, the results tend to look like a blurry mess - see the start of the video above for a visual! So, samplers use the model predictions to move a small amount towards the model prediction (removing some of the noise) and then get another prediction based on this marginally-less-rubbish input, and hope that this iteratively improves the result.
Different samplers do this in different ways. You can try to inspect the code for the default LMS sampler with:
```py
# ??scheduler.step
```
## Guidance
OK, final trick! How can we add some extra control to this generation process?
At each step, we're going to use our model as before to predict the noise component of x. Then we'll use this to produce a predicted output image, and apply some loss function to this image.
This function can be anything, but let's demo with a super simple example. If we want images that have a lot of blue, we can craft a loss function that gives a high loss if pixels have a low blue component:
```py
def blue_loss(images):
# How far are the blue channel values to 0.9:
error = torch.abs(images[:,2] - 0.9).mean() # [:,2] -> all images in batch, only the blue channel
return error
```
During each update step, we find the gradient of the loss with respect to the current noisy latents, and tweak them in the direction that reduces this loss as well as performing the normal update step:
```py
prompt = 'A campfire (oil on canvas)' #@param
height = 512 # default height of Stable Diffusion
width = 512 # default width of Stable Diffusion
num_inference_steps = 50 #@param # Number of denoising steps
guidance_scale = 8 #@param # Scale for classifier-free guidance
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
batch_size = 1
blue_loss_scale = 200 #@param
# Prep text
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
# And the uncond. input as before:
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
scheduler.set_timesteps(num_inference_steps)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform CFG
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
#### ADDITIONAL GUIDANCE ###
if i%5 == 0:
# Requires grad on the latents
latents = latents.detach().requires_grad_()
# Get the predicted x0:
# latents_x0 = latents - sigma * noise_pred
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
# Decode to image space
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
# Calculate loss
loss = blue_loss(denoised_images) * blue_loss_scale
# Occasionally print it out
if i%10==0:
print(i, 'loss:', loss.item())
# Get gradient
cond_grad = torch.autograd.grad(loss, latents)[0]
# Modify the latents based on this gradient
latents = latents.detach() - cond_grad * sigma**2
# Now step with scheduler
latents = scheduler.step(noise_pred, t, latents).prev_sample
latents_to_pil(latents)[0]
```
```py
0 loss: 182.02133178710938
10 loss: 43.55351257324219
20 loss: 15.30621337890625
30 loss: 9.746519088745117
40 loss: 8.846868515014648
```
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours.">
</div>
Tweak the scale (`blue_loss_scale`) - at low values, the image is mostly red and orange thanks to the prompt. At higher values, it is mostly bluish! Too high and we get a plain blue image.
Since this is slow, you'll notice that I only apply this loss once every 5 iterations - this was a suggestion from Jeremy and we left it in because for this demo it saves time and still works. For your own tests you may want to explore using a lower scale for the loss and applying it every iteration instead :)
NB: We should set latents requires_grad=True before we do the forward pass of the unet (removing with `torch.no_grad()`) if we want mode accurate gradients. BUT this requires a lot of extra memory. You'll see both approaches used depending on whose implementation you're looking at.
Guiding with classifier models can give you images of a specific class. Guiding with a model like CLIP can help better match a text prompt. Guiding with a style loss can help add a particular style. Guiding with some sort of perceptual loss can force it towards the overall look af a target image. And so on.
##Conclusions
Hopefully now you have a slightly better idea of what is happening when you make an image with one of these models, and how you can modify the process in creative ways. I hope you're inspired to make something fun :)
This notebook was written by Jonathan Whitaker, adapted from [Grokking Stable Diffusion](https://colab.research.google.com/drive/1dlgggNa5Mz8sEAGU0wFCHhGLFooW_pf1?usp=sharing) which was my early attempts to understand these components for myself. If you spot bugs or have questions, feel free to reach out to me @johnowhitaker :) Enjoy! | diffusion-models-class/units/en/unit3/3.mdx/0 | {
"file_path": "diffusion-models-class/units/en/unit3/3.mdx",
"repo_id": "diffusion-models-class",
"token_count": 20868
} | 143 |
# Sprint ControlNet en JAX/Diffusers
Bienvenue au sprint communautaire en JAX/Diffusers ! L'objectif de ce sprint est de travailler sur des modèles de diffusion amusants et créatifs en utilisant JAX et Diffusers.
Lors de cet événement, nous créerons diverses applications avec des modèles de diffusion en JAX/Flax et Diffusers en utilisant des heures TPU gratuites généreusement fournies par Google Cloud.
Ce document présente toutes les informations importantes pour faire une soumission au sprint.
## Organisation
Les participants peuvent proposer des idées pour un projet intéressant impliquant des modèles de diffusion. Des équipes de 3 à 5 personnes seront ensuite formées autour des projets les plus prometteurs et les plus intéressants. Assurez-vous de lire la section Communication pour savoir comment proposer des projets, commenter les idées de projet des autres participants et créer une équipe.
Pour aider chaque équipe à mener à bien son projet, nous organiserons des conférences données par des scientifiques et des ingénieurs de Google, de Hugging Face et de la communauté open source. Les conférences auront lieu le 17 avril. Assurez-vous d'assister aux conférences pour tirer le meilleur parti de votre participation ! Consultez la section Conférences pour avoir une vue d'ensemble des conférences, y compris l'orateur et l'heure de la conférence.
Chaque équipe bénéficiera ensuite d'un **accès gratuit à une VM TPU v4-8** du 14 avril au 1er mai. De plus, nous fournirons un exemple d'entraînement en JAX/Flax et Diffusers pour entraîner un [ControlNet](https://huggingface.co/blog/controlnet) afin de lancer votre projet. Nous fournirons également des exemples sur la façon de préparer les jeux de données. Pendant le sprint, nous nous assurerons de répondre à toutes les questions que vous pourriez avoir sur JAX/Flax et Diffusers et nous aiderons chaque équipe autant que possible !
> Nous ne distribuerons pas de TPU pour les équipes composées d'un seul membre. Nous vous encourageons donc à rejoindre une équipe ou à trouver des coéquipiers pour votre idée.
À la fin du sprint, chaque soumission sera évaluée par un jury et les trois meilleures démonstrations recevront un prix. Consultez la section Comment soumettre une démo pour plus d'informations et de suggestions sur la manière de soumettre votre projet.
> 💡 Note : Même si nous fournissons un exemple pour entraîner ControlNet, les participants peuvent proposer des idées qui n'impliquent pas du tout un ControlNet du moment qu'elles sont centrées sur les modèles de diffusion.
## Dates importantes
- **29/03** Annonce officielle de la semaine de la communauté.
- **31/03** Commencez à former des groupes dans le canal #jax-diffusers-ideas sur Discord.
- **10/04** Collecte des données.
- **13/04 & 14/04 & 17/04** [Conférences de lancement sur YouTube](https://www.youtube.com/watch?v=SOj2sxgvFe0).
- **14/04 à 17/04.** Début de l'accès aux TPU.
- **01/05** Fermeture de l'accès aux TPU.
- **08/05** : Annonce des 10 meilleurs projets et des prix.
> 💡 Note : Nous accepterons les candidatures tout au long du sprint.
## Communication
Toutes les communications importantes auront lieu sur notre serveur Discord. Rejoignez le serveur en utilisant [ce lien] (https://hf.co/join/discord). Après avoir rejoint le serveur, prenez le rôle Diffusers dans le canal `#role-assignment` et dirigez-vous vers le canal `#jax-diffusers-ideas` pour partager votre idée sous la forme d'un message de forum. Pour vous inscrire, remplissez le formulaire d'inscription et nous vous donnerons accès à deux canaux Discord supplémentaires pour les discussions et le support technique, ainsi qu'un accès aux TPU.
Les annonces importantes de l'équipe Hugging Face, Flax/JAX et Google Cloud seront publiées sur le serveur.
Le serveur Discord sera le lieu central où les participants pourront publier leurs résultats, partager leurs expériences d'apprentissage, poser des questions et obtenir une assistance technique pour les divers obstacles qu'ils rencontrent.
Pour les problèmes liés à Flax/JAX, Diffusers, Datasets ou pour des questions spécifiques à votre projet, nous interagirons à travers les dépôts publics et les forums :
- Flax: [Issues](https://github.com/google/flax/issues), [Questions](https://github.com/google/flax/discussions)
- JAX: [Issues](https://github.com/google/jax/issues), [Questions](https://github.com/google/jax/discussions)
- 🤗 Diffusers: [Issues](https://github.com/huggingface/diffusers/issues), [Questions](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)
- 🤗 Datasets: [Issues](https://github.com/huggingface/datasets/issues), [Questions](https://discuss.huggingface.co/c/datasets/10)
- Questions spécifiques aux projets : Elles peuvent être posées sur le canal #jax-diffusers-ideas sur Discord.
- Questions relatives au TPU : Canal `#jax-diffusers-tpu-support` sur Discord.
- Discussion générale : `#jax-diffusers-sprint channel` sur Discord.
Vous aurez accès aux canaux `#jax-diffusers-tpu-support` et `#jax-diffusers-sprint` une fois que vous aurez été accepté pour participer au sprint.
Lorsque vous demandez de l'aide, nous vous encourageons à poster le lien vers le [forum](https://discuss.huggingface.co) sur le serveur Discord, plutôt que de poster directement des *issues* ou des questions.
De cette façon, nous nous assurons que tout le monde peut bénéficier de vos questions, même après la fin du sprint.
> Note : Après le 10 avril, si vous vous êtes inscrit sur le formulaire Google, mais que vous n'êtes pas dans le canal Discord, veuillez laisser un message sur [l'annonce officielle du forum](https://discuss.huggingface.co/t/controlling-stable-diffusion-with-jax-and-diffusers-using-v4-tpus/35187/2) et envoyer un ping à `@mervenoyan`, `@sayakpaul`, et `@patrickvonplaten`. Il se peut que nous prenions un jour pour traiter ces demandes.
## Conférences
Nous avons invité d'éminents chercheurs et ingénieurs de Google, Hugging Face, et de la communauté open-source qui travaillent dans le domaine de l'IA générative. Nous mettrons à jour cette section avec des liens vers les conférences, alors gardez un œil ici ou sur Discord dans le canal diffusion models core-announcements et programmez vos rappels !
### **13 avril 2023**
| Intervenant | Sujet | Horaire | Video |
|---|---|---|---|
[Emiel Hoogeboom, Google Brain](https://twitter.com/emiel_hoogeboom?lang=en) | Pixel-Space Diffusion models for High Resolution Images | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [](https://www.youtube.com/watch?v=iw2WCAGxdQ4) |
| [Apolinário Passos, Hugging Face](https://twitter.com/multimodalart?lang=en) | Introduction to Diffusers library | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [](https://www.youtube.com/watch?v=iw2WCAGxdQ4)
| [Ting Chen, Google Brain](https://twitter.com/tingchenai?lang=en) | Diffusion++: discrete data and high-dimensional generation | 5.45pm-6.25pm CEST / 08.45am-09.25am PST | [](https://www.youtube.com/watch?v=iw2WCAGxdQ4) |
### **14 avril 2023**
| Intervenant | Sujet | Horaire | Video |
|---|---|---|---|
| [Tim Salimans, Google Brain](https://twitter.com/timsalimans?lang=en) | Efficient image and video generation with distilled diffusion models | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) |
| [Suraj Patil, Hugging Face](https://twitter.com/psuraj28?lang=en) | Masked Generative Models: MaskGIT/Muse | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) |
| [Sabrina Mielke, John Hopkins University](https://twitter.com/sjmielke?lang=en) | From stateful code to purified JAX: how to build your neural net framework | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) |
### **17 avril 2023**
| Intervenant | Sujet | Horaire | Video |
|---|---|---|---|
| [Andreas Steiner, Google Brain](https://twitter.com/AndreasPSteiner) | JAX & ControlNet | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [](https://www.youtube.com/watch?v=SOj2sxgvFe0) |
| [Boris Dayma, craiyon](https://twitter.com/borisdayma?lang=en) | DALL-E Mini | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [](https://www.youtube.com/watch?v=SOj2sxgvFe0) |
| [Margaret Mitchell, Hugging Face](https://twitter.com/mmitchell_ai?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor) | Ethics of Text-to-Image | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [](https://www.youtube.com/watch?v=SOj2sxgvFe0) |
## Données et prétraitement
Dans cette section, nous verrons comment construire votre propre jeu de données pour entraîner ControlNet.
### Préparer un grand jeu de données local
#### Monter un disque
Si vous avez besoin d'espace supplémentaire, vous pouvez suivre [ce guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#prerequisites) pour créer un disque persistant, l'attacher à votre VM TPU et créer un répertoire pour monter le disque. Vous pouvez ensuite utiliser ce répertoire pour stocker votre jeu de données.
Par ailleurs, la VM TPU attribuée à votre équipe dispose d'un disque de stockage persistant de 3 To. Pour apprendre à l'utiliser, consultez [ce guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#mount-pd).
#### Prétraitement des données
Nous montrons ici comment préparer un grand jeu de données pour entraîner un modèle ControlNet avec filtre de Canny. Plus précisément, nous fournissons un [exemple de script](./dataset_tools/coyo_1m_dataset_preprocess.py) qui :
* Sélectionne 1 million de paires image-texte à partir d'un jeu de données existant [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m).
* Télécharge chaque image et utilise le filtre de Canny pour générer l'image de conditionnement.
* Crée un métafichier qui relie toutes les images et les images traitées à leurs légendes.
Utilisez la commande suivante pour exécuter le script de prétraitement des données de l'exemple. Si vous avez monté un disque sur votre TPU, vous devez placer vos fichiers `train_data_dir` et `cache_dir` sur le disque monté.
```py
python3 coyo_1m_dataset_preprocess.py \
--train_data_dir="/mnt/disks/persist/data" \
--cache_dir="/mnt/disks/persist" \
--max_train_samples=1000000 \
--num_proc=16
```
Une fois le script exécuté, vous trouverez un dossier de données dans le répertoire `train_data_dir` spécifié avec la structure de dossier ci-dessous :
```py
data
├── images
│ ├── image_1.png
│ ├── .......
│ └── image_1000000.jpeg
├── processed_images
│ ├── image_1.png
│ ├── .......
│ └── image_1000000.jpeg
└── meta.jsonl
```
#### Charger un jeu de données
Pour charger un jeu de données à partir du dossier de données que vous venez de créer, vous devez ajouter un script de chargement de jeu de données à votre dossier de données. Le script de chargement de données doit porter le même nom que le dossier. Par exemple, si votre dossier de données est `data`, vous devez ajouter un script de chargement de données nommé `data.py`. Nous fournissons un [exemple de script de chargement de données](./dataset_tools/data.py) que vous pouvez utiliser. Tout ce que vous avez à faire est de mettre à jour le `DATA_DIR` avec le chemin correct de votre dossier de données. Pour plus de détails sur l'écriture d'un script de chargement de données, reportez-vous à la [documentation] (https://huggingface.co/docs/datasets/dataset_script).
Une fois que le script de chargement de données est ajouté à votre dossier de données, vous pouvez le charger avec :
```py
dataset = load_dataset("/mnt/disks/persist/data", cache_dir="/mnt/disks/persist" )
```
Notez que vous pouvez utiliser `--train_data_dir` pour passer le répertoire de votre dossier de données au script d'entraînement et générer votre jeu de données automatiquement pendant l'entraînement.
Pour les grands jeux de données, nous recommandons de générer le jeu de données une seule fois et de le sauvegarder sur le disque à l'aide de la commande
```py
dataset.save_to_disk("/mnt/disks/persist/dataset")
```
Vous pouvez ensuite réutiliser le jeu de données sauvegardé pour votre entraînement en passant `--load_from_disk`.
Voici un exemple d'exécution d'un script d'entraînement qui chargera le jeu de données depuis le disque.
```py
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="/mnt/disks/persist/canny_model"
export DATASET_DIR="/mnt/disks/persist/dataset"
export DISK_DIR="/mnt/disks/persist"
python3 train_controlnet_flax.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--train_data_dir=$DATASET_DIR \
--load_from_disk \
--cache_dir=$DISK_DIR \
--resolution=512 \
--learning_rate=1e-5 \
--train_batch_size=2 \
--revision="non-ema" \
--from_pt \
--max_train_steps=500000 \
--checkpointing_steps=10000 \
--dataloader_num_workers=16
```
### Préparer un jeu de données avec MediaPipe et Hugging Face
Nous fournissons un *notebook* ([ ](https://colab.research.google.com/github/huggingface/community-events/blob/main/jax-controlnet-sprint/dataset_tools/create_pose_dataset.ipynb)) qui vous montre comment préparer un jeu de données pour entraîner ControlNet en utilisant [MediaPipe](https://developers.google.com/mediapipe) et Hugging Face. Plus précisément, dans le *notebook*, nous montrons :
* Comment tirer parti des solutions MediaPipe pour extraire les articulations du corps de la pose à partir des images d'entrée.
* Prédire les légendes en utilisant BLIP-2 à partir des images d'entrée en utilisant 🤗 Transformers.
* Construire et pousser le jeu de données final vers le Hugging Face Hub en utilisant 🤗 Datasets.
Vous pouvez vous référer au *notebook* pour créer vos propres jeux de données en utilisant d'autres solutions MediaPipe. Ci-dessous, nous listons toutes les solutions pertinentes :
* [Détection des points de repère de pose](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker)
* [Détection de repères de visage](https://developers.google.com/mediapipe/solutions/vision/face_landmarker)
* [Segmentation de selfie](https://developers.google.com/mediapipe/solutions/vision/image_segmenter)
## Entraîner ControlNet
C'est peut-être la partie la plus amusante et la plus intéressante de ce document, car nous vous montrons ici comment entraîner un modèle ControlNet personnalisé.
> 💡 Note : Pour ce sprint, vous n'êtes PAS limité à entraîner des ControlNets. Nous fournissons ce script d'entraînement comme référence pour vous permettre de démarrer.
Pour un entraînement plus rapide sur les TPU et les GPU, vous pouvez tirer parti de l'exemple d'entraînement Flax. Suivez les instructions ci-dessus pour obtenir le modèle et le jeu de données avant d'exécuter le script.
### Mise en place de la VM TPU
Avant de continuer avec le reste de cette section, vous devez vous assurer que l'adresse email que vous utilisez a été ajoutée au projet `hf-flax` sur Google Cloud Platform. Si ce n'est pas le cas, merci de nous le faire savoir sur le serveur Discord (vous pouvez taguer `@sayakpaul`, `@merve`, et `@patrickvonplaten`).
Dans ce qui suit, nous allons décrire comment le faire en utilisant une console standard, mais vous devriez également être en mesure de vous connecter à la VM TPU via des IDE, comme Visual Studio Code, etc.
1. Vous devez installer le [Google Cloud SDK](https://cloud.google.com/sdk/docs/install). Veuillez suivre les instructions sur https://cloud.google.com/sdk.
2. Une fois le Google Cloud SDK installé, vous devez configurer votre compte en exécutant la commande suivante. Assurez-vous que <votre-adresse-email> correspond à l'adresse gmail que vous avez utilisée pour vous inscrire à cet événement.
```bash
gcloud config set account <your-email-adress>
```
3. Assurons-nous également que le bon projet est défini au cas où votre email serait utilisé pour plusieurs projets gcloud :
```bash
gcloud config set project hf-flax
```
4. Ensuite, vous devez vous authentifier. Vous pouvez le faire en exécutant la commande
```bash
gcloud auth login
```
Vous devriez obtenir un lien vers un site web où vous pouvez authentifier votre compte gmail.
5. Enfin, vous pouvez établir un tunnel SSH dans la VM TPU ! Veuillez exécuter la commande suivante en réglant la "--zone" sur `us-central2-b` et sur le nom de la TPU qui vous a été envoyé par email par l'équipe de Hugging Face.
```bash
gcloud alpha compute tpus tpu-vm ssh <tpu-name> --zone <zone> --project hf-flax
```
Cela devrait établir un tunnel SSH dans la VM TPU !
> 💡 Note : Vous n'êtes PAS supposé avoir accès à la console Google Cloud. Aussi, il se peut que vous ne receviez pas de lien d'invitation pour rejoindre le projet `hf-flax`. Mais vous devriez tout de même pouvoir accéder à la VM TPU en suivant les étapes ci-dessus .
> Note : Les VM TPU sont déjà attachées à des disques de stockage persistants (de 3 TB). Cela sera utile
au cas où votre équipe souhaiterait entraîner localement un jeu de données volumineux. Le nom du disque de stockage devrait également figurer dans l'e-mail que vous avez reçu. Suivez [cette section](https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint#mount-a-disk) pour plus de détails.
### Installation de JAX
Commençons par créer un environnement virtuel Python :
```bash
python3 -m venv <your-venv-name>
```
Nous pouvons activer l'environnement en lançant :
```bash
source ~/<your-venv-name>/bin/activate
```
Installez ensuite Diffusers et les dépendances d'entraînement de la bibliothèque :
```bash
pip install git+https://github.com/huggingface/diffusers.git
```
Ensuite, clonez ce dépôt et installez JAX, Flax et les autres dépendances :
```bash
git clone https://github.com/huggingface/community-events
cd community-events/jax-controlnet-sprint/training_scripts
pip install -U -r requirements_flax.txt
```
Pour vérifier que JAX a été correctement installé, vous pouvez exécuter la commande suivante :
```py
import jax
jax.device_count()
```
Cela devrait afficher le nombre de cœurs de la TPU, qui devrait être de 4 sur une VM TPUv4-8. Si Python n'est pas capable de détecter le périphérique TPU, veuillez consulter la section des erreurs possibles plus bas pour des solutions.
Si vous souhaitez utiliser le logging Weights and Biases, vous devez également installer `wandb` maintenant :
```bash
pip install wandb
```
> 💡 Note : Weights & Biases est gratuit pour les étudiants, les éducateurs et les chercheurs universitaires. Tous les participants à notre événement sont qualifiés pour obtenir un compte d'équipe académique Weights & Biases. Pour créer votre équipe, vous pouvez visiter le site https://wandb.ai/create-team et choisir le type d'équipe "*Academic*". Pour plus d'informations sur la création et la gestion d'une équipe Weights & Biases, vous pouvez consulter le site https://docs.wandb.ai/guides/app/features/teams.
### Exécution du script d'entraînement
Maintenant, téléchargeons deux images de conditionnement que nous utiliserons pour lancer la validation pendant l'entraînement afin de suivre nos progrès
```bash
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
```
Nous vous encourageons à stocker ou à partager votre modèle avec la communauté. Pour utiliser le Hub, veuillez vous connecter à votre compte Hugging Face, ou ([en créer un](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) si vous n'en avez pas déjà un) :
```bash
huggingface-cli login
```
Assurez-vous que les variables d'environnement `MODEL_DIR`, `OUTPUT_DIR` et `HUB_MODEL_ID` sont définies. Les variables `OUTPUT_DIR` et `HUB_MODEL_ID` spécifient où sauvegarder le modèle sur le Hub :
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="runs/fill-circle-{timestamp}"
export HUB_MODEL_ID="controlnet-fill-circle"
```
Et enfin, démarrez l'entraînement (assurez-vous d'être dans le répertoire `jax-controlnet-sprint/training_scripts`) !
```bash
python3 train_controlnet_flax.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--validation_steps=1000 \
--train_batch_size=2 \
--revision="non-ema" \
--from_pt \
--report_to="wandb" \
--tracker_project_name=$HUB_MODEL_ID \
--num_train_epochs=11 \
--push_to_hub \
--hub_model_id=$HUB_MODEL_ID
```
Notez que l'argument `--from_pt` convertira votre point de contrôle pytorch en flax. Cependant, il ne fonctionnera qu'avec les points de contrôle au format diffusers. Si votre `MODEL_DIR` ne contient pas de points de contrôle au format diffusers, vous ne pouvez pas utiliser l'argument `--from_pt`. Vous pouvez convertir vos points de contrôle `ckpt` ou `safetensors` au format diffusers en utilisant [ce script] (https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py).
Puisque nous avons passé l'argument `--push_to_hub`, il va automatiquement créer un repo de modèle sous votre compte Hugging Face basé sur `$HUB_MODEL_ID`. À la fin de l'entraînement, le point de contrôle final sera automatiquement stocké sur le Hub. Vous pouvez trouver un exemple de modèle [ici](https://huggingface.co/YiYiXu/fill-circle-controlnet).
Notre script d'entraînement fournit également un support limité pour le streaming de grands jeux de données à partir du Hub. Afin d'activer le streaming, il faut également définir `--max_train_samples`. Voici un exemple de commande (tiré de [cet article de blog](https://huggingface.co/blog/train-your-controlnet)) :
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="runs/uncanny-faces-{timestamp}"
export HUB_MODEL_ID="controlnet-uncanny-faces"
python3 train_controlnet_flax.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=multimodalart/facesyntheticsspigacaptioned \
--streaming \
--conditioning_image_column=spiga_seg \
--image_column=image \
--caption_column=image_caption \
--resolution=512 \
--max_train_samples 100000 \
--learning_rate=1e-5 \
--train_batch_size=1 \
--revision="flax" \
--report_to="wandb" \
--tracker_project_name=$HUB_MODEL_ID
```
Notez cependant que les performances des TPUs peuvent être limitées car le streaming avec `datasets` n'est pas optimisé pour les images. Pour assurer un débit maximal, nous vous encourageons à explorer les options suivantes :
* [Webdataset](https://webdataset.github.io/webdataset/)
* [TorchData](https://github.com/pytorch/data)
* [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds)
Lorsque vous travaillez avec un jeu de données plus important, vous pouvez avoir besoin d'exécuter le processus d'entraînement pendant une longue période et il est utile d'enregistrer des points de contrôle réguliers au cours du processus. Vous pouvez utiliser l'argument suivant pour activer les points de contrôle intermédiaires :
```bash
--checkpointing_steps=500
```
Cela permet d'enregistrer le modèle entraîné dans des sous-dossiers du dossier output_dir. Le nom des sous-dossiers correspond au nombre d'étapes effectuées jusqu'à présent ; par exemple : un point de contrôle sauvegardé après 500 étapes d'entraînement serait sauvegardé dans un sous-dossier nommé 500
Vous pouvez alors commencer votre entraînement à partir de ce point de contrôle sauvegardé avec
```bash
--controlnet_model_name_or_path="./control_out/500"
```
Nous soutenons l'entraînement avec la stratégie de pondération Min-SNR proposée dans [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) qui permet d'obtenir une convergence plus rapide en rééquilibrant la perte. Pour l'utiliser, il faut définir l'argument `--snr_gamma`. La valeur recommandée est `5.0`.
Nous supportons également l'accumulation de gradient, technique qui vous permet d'utiliser une taille de batch plus grande que celle que votre machine serait normalement capable de mettre en mémoire. Vous pouvez utiliser l'argument `gradient_accumulation_steps` pour définir les étapes d'accumulation du gradient. L'auteur de ControlNet recommande d'utiliser l'accumulation de gradient pour obtenir une meilleure convergence. Pour en savoir plus voir [ici](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation).
Vous pouvez **profiler votre code** avec :
```bash
--profile_steps==5
```
Reportez-vous à la [documentation JAX sur le profilage](https://jax.readthedocs.io/en/latest/profiling.html). Pour inspecter la trace de profil, vous devez installer et démarrer Tensorboard avec le plugin de profil :
```bash
pip install tensorflow tensorboard-plugin-profile
tensorboard --logdir runs/fill-circle-100steps-20230411_165612/
```
Le profil peut alors être inspecté à l'adresse http://localhost:6006/#profile.
Parfois vous obtiendrez des conflits de version (messages d'erreur comme `Duplicate plugins for name projector`), ce qui signifie que vous devez désinstaller et réinstaller toutes les versions de Tensorflow/Tensorboard (par exemple avec `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`).
Notez que la fonctionnalité de débogage du plugin Tensorboard `profile` est toujours en cours de développement. Toutes les vues ne sont pas entièrement fonctionnelles, et par exemple le `trace_viewer` coupe les événements après 1M (ce qui peut résulter en la perte de toutes vos traces de périphériques si par exemple vous profilez l'étape de compilation par accident).
### Dépannage de votre VM TPU
**TRES IMPORTANT** : Un seul processus peut accéder aux cœurs de la TPU à la fois. Cela signifie que si plusieurs membres de l'équipe essaient de se connecter aux cœurs de la TPU, vous obtiendrez des erreurs telles que :
```
libtpu.so already in used by another process. Not attempting to load libtpu.so in this process.
```
Nous recommandons à chaque membre de l'équipe de créer son propre environnement virtuel, mais une seule personne devrait exécuter les processus d'entraînement lourds. De plus, veuillez vous relayer lors de l'installation de la TPUv4-8 afin que tout le monde puisse vérifier que JAX est correctement installé.
Si les membres de votre équipe n'utilisent pas actuellement la TPU mais que vous obtenez toujours ce message d'erreur. Vous devez tuer le processus qui utilise la TPU avec :
```
kill -9 PID
```
vous devrez remplacer le terme "PID" par le PID du processus qui utilise TPU. Dans la plupart des cas, cette information est incluse dans le message d'erreur. Par exemple, si vous obtenez
```
The TPU is already in use by a process with pid 1378725. Not attempting to load libtpu.so in this process.
```
vous pouvez faire
```
kill -9 1378725
```
Vous pouvez également utiliser la commande suivante pour trouver les processus utilisant chacune des puces TPU (par exemple, `/dev/accel0` est l'une des puces TPU)
```
sudo lsof -w /dev/accel0
```
Pour tuer tous les processus à l'aide de `/dev/accel0`, il faut
```
sudo lsof -t /dev/accel0 | xargs kill -9
```
Si Python n'est pas capable de détecter votre périphérique TPU (i.e. quand vous faites `jax.device_count()` et qu'il sort `0`), cela peut être dû au fait que vous n'avez pas les droits d'accès aux logs tpu, ou que vous avez un fichier tpu lock qui traîne. Exécutez les commandes suivantes pour résoudre le problème
```
sudo rm -f /tmp/libtpu_lockfile
```
```
sudo chmod o+w /tmp/tpu_logs/
```
## Comment faire une soumission
Pour faire une soumission complète, vous devez avoir les éléments suivants sur le Hub d'Hugging Face :
- Un dépôt de modèle avec les poids du modèle et la carte du modèle,
- (Facultatif) Un dépôt de jeu de données avec une carte de jeu de données,
- Un *Space* qui permet aux autres d'interagir avec votre modèle.
### Pousser les poids du modèle et la carte du modèle vers le Hub
**Si vous utilisez le script d'entraînement (`train_controlnet_flax.py`) fourni dans ce répertoire**
L'activation de l'argument `push_to_hub` dans les arguments d'entraînement va :
- Créer un dépôt de modèles localement et à distance sur le Hub,
- Créer une carte de modèle et l'écrire dans le dépôt de modèles local,
- Sauvegarder votre modèle dans le référentiel de modèles local,
- Pousser le dépôt local vers le Hub.
Votre carte de modèle générée automatiquement ressemblera à ceci :
.
Vous pouvez modifier la carte de modèle pour qu'elle soit plus informative. Les cartes de modèle qui sont plus informatives que les autres auront plus de poids lors de l'évaluation.
**Si vous avez entraîné un modèle personnalisé et que vous n'avez pas utilisé le script**
Vous devez vous authentifier avec `huggingface-cli login` comme indiqué ci-dessus. Si vous utilisez une des classes de modèles disponibles dans `diffusers`, sauvegardez votre modèle avec la méthode `save_pretrained` de votre modèle.
```py
model.save_pretrained("path_to_your_model_repository")
```
Après avoir sauvegardé votre modèle dans un dossier, vous pouvez simplement utiliser le script ci-dessous pour pousser votre modèle vers le Hub :
```py
from huggingface_hub import create_repo, upload_folder
create_repo("username/my-awesome-model")
upload_folder(
folder_path="path_to_your_model_repository",
repo_id="username/my-awesome-model"
)
```
Ceci poussera votre modèle vers Hub. Après avoir poussé cela, vous devez créer la carte de modèle vous-même.
Vous pouvez utiliser l'interface graphique pour l'éditer.

Chaque carte de modèle se compose de deux sections, les métadonnées et le texte libre. Vous pouvez éditer les métadonnées à partir des sections dans l'interface graphique. Si vous avez sauvegardé votre modèle en utilisant `save_pretrained`, vous n'avez pas besoin de fournir `pipeline_tag` et `library_name`. Sinon, fournissez `pipeline_tag`, `library_name` et le jeu de données s'il existe sur Hugging Face Hub. En plus de cela, vous devez ajouter `jax-diffusers-event` à la section `tags`.
```
---
license: apache-2.0
library_name: diffusers
tags:
- jax-diffusers-event
datasets:
- red_caps
pipeline_tag: text-to-image
---
```

### Créer notre *Space*
<h4> Rédiger notre application </h4>
Nous utiliserons [Gradio] (https://gradio.app/) pour créer nos applications. Gradio possède deux API principales : `Interface` et `Blocks`. `Interface` est une API de haut niveau qui vous permet de créer une interface avec quelques lignes de code, et `Blocks` est une API de plus bas niveau qui vous donne plus de flexibilité sur les interfaces que vous pouvez construire. Le code doit être inclus dans un fichier appelé `app.py`.
Essayons de créer une application ControlNet comme exemple. L'API `Interface` fonctionne simplement comme suit :
```py
import gradio as gr
# La fonction d'inférence prend en compte le prompt, le prompt négatif et l'image
def infer(prompt, negative_prompt, image):
# implémentez votre fonction d'inférence ici
return output_image
# vous devez passer les entrées et les sorties en fonction de la fonction d'inférence
gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image").launch()
```
Vous pouvez personnaliser votre interface en passant `title`, `description` et `examples` à la fonction `Interface`.
```py
title = "ControlNet on Canny Filter"
description = "This is a demo on ControlNet based on canny filter."
# vous devez passer vos exemples en fonction de vos entrées
# chaque liste intérieure est un exemple, chaque élément de la liste correspondant à un composant des `inputs`.
examples = [["a cat with cake texture", "low quality", "cat_image.png"]]
gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image",
title = title, description = description, examples = examples, theme='gradio/soft').launch()
```
Votre interface ressemblera à ceci :

Avec les blocs, vous pouvez ajouter des marques, des onglets, des composants sous les colonnes et les lignes, etc. Supposons que nous ayons deux ControlNets et que nous voulions les inclure dans un *Space*. Nous les placerons sous différents onglets dans une démo comme ci-dessous :
```py
import gradio as gr
def infer_segmentation(prompt, negative_prompt, image):
# votre fonction d'inférence pour le contrôle de la segmentation
return im
def infer_canny(prompt, negative_prompt, image):
# votre fonction d'inférence pour un contrôle efficace
return im
with gr.Blocks(theme='gradio/soft') as demo:
gr.Markdown("## Stable Diffusion with Different Controls")
gr.Markdown("In this app, you can find different ControlNets with different filters. ")
with gr.Tab("ControlNet on Canny Filter "):
prompt_input_canny = gr.Textbox(label="Prompt")
negative_prompt_canny = gr.Textbox(label="Negative Prompt")
canny_input = gr.Image(label="Input Image")
canny_output = gr.Image(label="Output Image")
submit_btn = gr.Button(value = "Submit")
canny_inputs = [prompt_input_canny, negative_prompt_canny, canny_input]
submit_btn.click(fn=infer_canny, inputs=canny_inputs, outputs=[canny_output])
with gr.Tab("ControlNet with Semantic Segmentation"):
prompt_input_seg = gr.Textbox(label="Prompt")
negative_prompt_seg = gr.Textbox(label="Negative Prompt")
seg_input = gr.Image(label="Image")
seg_output = gr.Image(label="Output Image")
submit_btn = gr.Button(value = "Submit")
seg_inputs = [prompt_input_seg, negative_prompt_seg, seg_input]
submit_btn.click(fn=infer_segmentation, inputs=seg_inputs, outputs=[seg_output])
demo.launch()
```
La démo ci-dessus ressemblera à ce qui suit :

#### Créer notre *Space*
Une fois notre application écrite, nous pouvons créer un espace Hugging Face pour héberger notre application. Vous pouvez aller sur [huggingface.co](http://huggingface.co), cliquer sur votre profil en haut à droite et sélectionner "*New Space*".

Nous pouvons nommer notre *Space*, choisir une licence et sélectionner "Gradio" comme Space SDK.

Après avoir créé le *Space*, vous pouvez soit utiliser les instructions ci-dessous pour cloner le dépôt localement, ajouter vos fichiers et pousser, soit utiliser l'interface graphique pour créer les fichiers et écrire le code dans le navigateur.

Pour télécharger votre fichier de candidature, cliquez sur "*Add File*" et faites glisser votre fichier.

Enfin, nous devons créer un fichier appelé `requirements.txt` et ajouter les conditions requises pour notre projet. Assurez-vous d'installer les versions de jax, diffusers et autres dépendances comme ci-dessous.
```py
-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
jax[cuda11_cudnn805]
jaxlib
git+https://github.com/huggingface/diffusers@main
opencv-python
transformers
flax
```
Nous vous accorderons une dotation GPU afin que votre application puisse fonctionner sur GPU.
Nous avons un classement hébergé [ici] (https://huggingface.co/spaces/jax-diffusers-event/leaderboard) et nous distribuerons des prix à partir de ce classement. Pour que votre *Space* apparaisse sur le leaderboard, éditez simplement `README.md` de votre *Space* pour avoir le tag `jax-diffusers-event` sous les tags comme ci-dessous :
```py
---
title: Canny Coyo1m
emoji: 💜
...py
tags:
- jax-diffusers-event
---
```
## Prix
Pour ce sprint, nous aurons de nombreux prix. Nous choisirons les dix premiers projets de [ce classement](https://huggingface.co/spaces/jax-diffusers-event/leaderboard), vous devez donc tagger votre *Space* pour le classement afin que votre soumission soit complète, comme indiqué dans la section ci-dessus. Les projets sont classés en fonction du nombre de j'aimes, nous augmenterons donc partagerons vos Spaces pour en augmenter la visibilité pour que les gens puissent voter en laissant un j'aime sur votre Space. Nous sélectionnerons les dix premiers projets du classement et le jury votera pour déterminer les trois premières places. Ces projets seront mis en valeur par Google et Hugging Face. Les interfaces élaborées ainsi que les projets dont les bases de code et les modèles sont en libre accès augmenteront probablement les chances de gagner des prix.
Les prix sont les suivants et sont remis à chaque membre de l'équipe :
**Première place** : Un bon d'achat de 150 $ à dépenser sur le [*Hugging Face Store*](https://store.huggingface.co/), un abonnement d'un an à Hugging Face Hub PRO, le livre *Natural Language Processing with Transformers*.
**Deuxième place** : Un bon d'achat de 125$ à dépenser sur le [*Hugging Face Store*](https://store.huggingface.co/), un abonnement d'un an à Hugging Face Hub PRO.
**Troisième place** : Un bon d'achat de 100 $ à dépenser sur le [*Hugging Face Store*](https://store.huggingface.co/), un abonnement d'un an à Hugging Face Hub PRO.
Les dix premiers projets du classement (indépendamment de la décision du jury) gagneront un kit de merch exclusivement conçu pour ce sprint par Hugging Face, ainsi qu'un kit de merch séparé JAX de Google.
## Jury
Le jury de ce sprint était composé des personnes suivantes :
1. Robin Rombach, Stability AI
2. Huiwen Chang, Google Research
3. Jun-Yan Zhu, Carnegie Mellon University
4. Merve Noyan, Hugging Face
## FAQ
Dans cette section, nous rassemblons les réponses aux questions fréquemment posées sur notre canal discord.
### Comment utiliser VSCode avec TPU VM ?
Vous pouvez suivre ce [guide général](https://medium.com/@ivanzhd/vscode-sftp-connection-to-compute-engine-on-google-cloud-platform-gcloud-9312797d56eb) sur la façon d'utiliser VSCode remote pour se connecter à Google Cloud VMs. Une fois que c'est configuré, vous pouvez développer sur la VM TPU en utilisant VSCode.
Pour obtenir votre IP externe, utilisez cette commande :
```py
gcloud compute tpus tpu-vm describe <node_name> --zone=<zone>
```
Elle devrait être listée sous 'accessConfig' -> 'externalIp'
### Comment tester votre code localement ?
Puisque les membres de l'équipe partagent la VM TPU, il peut être pratique d'écrire et de tester votre code localement sur une unité centrale pendant que vos coéquipiers exécutent le processus d'entraînement sur la VM. Pour effectuer des tests locaux, il est important de mettre le drapeau `xla_force_host_platform_device_count` à `4`. Pour en savoir plus, consultez la [documentation] (https://jax.readthedocs.io/en/latest/jax-101/06-parallelism.html#aside-hosts-and-devices-in-jax).
## Gagnants du sprint
Les 10 meilleurs projets (basés sur le nombre de likes sur leurs démos) sont disponibles sur ce [classement](https://huggingface.co/spaces/jax-diffusers-event/leaderboard). Nous avons soumis ce classement à notre jury pour qu'il juge les 10 meilleurs projets sur la base de plusieurs facteurs tels que les points de contrôle du modèle, les jeux de données et les bases de code open-source, l'exhaustivité du modèle et des cartes de jeux de données, etc. En conséquence, les trois projets suivants sont sortis vainqueurs :
1. [ControlNet pour la décoration intérieure](https://huggingface.co/spaces/controlnet-interior-design/controlnet-seg)
2. [ControlNet pour le réglage de la luminosité](https://huggingface.co/spaces/ioclab/brightness-controlnet)
3. [Stable Diffusion avec contrôle manuel](https://huggingface.co/spaces/vllab/controlnet-hands) | diffusion-models-class/units/fr/events/4.mdx/0 | {
"file_path": "diffusion-models-class/units/fr/events/4.mdx",
"repo_id": "diffusion-models-class",
"token_count": 15277
} | 144 |
<jupyter_start><jupyter_text>Traduction (PyTorch) Installez les bibliothèques 🤗 *Datasets* et 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
!pip install accelerate
# Pour exécuter l'entraînement sur TPU, vous devez décommenter la ligne suivante :
# !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl
!apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]"
!git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login
notebook_login()
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("kde4", lang1="en", lang2="fr")
raw_datasets
split_datasets = raw_datasets["train"].train_test_split(train_size=0.9, seed=20)
split_datasets
split_datasets["validation"] = split_datasets.pop("test")
split_datasets["train"][1]["translation"]
from transformers import pipeline
model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
translator = pipeline("translation", model=model_checkpoint)
translator("Default to expanded threads")
split_datasets["train"][172]["translation"]
translator(
"Unable to import %1 using the OFX importer plugin. This file is not the correct format."
)
from transformers import AutoTokenizer
model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="tf")
en_sentence = split_datasets["train"][1]["translation"]["en"]
fr_sentence = split_datasets["train"][1]["translation"]["fr"]
inputs = tokenizer(en_sentence)
with tokenizer.as_target_tokenizer():
targets = tokenizer(fr_sentence)
wrong_targets = tokenizer(fr_sentence)
print(tokenizer.convert_ids_to_tokens(wrong_targets["input_ids"]))
print(tokenizer.convert_ids_to_tokens(targets["input_ids"]))
max_input_length = 128
max_target_length = 128
def preprocess_function(examples):
inputs = [ex["en"] for ex in examples["translation"]]
targets = [ex["fr"] for ex in examples["translation"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Configurer le tokenizer pour les cibles
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
tokenized_datasets = split_datasets.map(
preprocess_function,
batched=True,
remove_columns=split_datasets["train"].column_names,
)
from transformers import AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
from transformers import DataCollatorForSeq2Seq
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
batch = data_collator([tokenized_datasets["train"][i] for i in range(1, 3)])
batch.keys()
batch["labels"]
batch["decoder_input_ids"]
for i in range(1, 3):
print(tokenized_datasets["train"][i]["labels"])
!pip install sacrebleu
from datasets import load_metric
metric = load_metric("sacrebleu")
predictions = [
"This plugin lets you translate web pages between several languages automatically."
]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
metric.compute(predictions=predictions, references=references)
predictions = ["This This This This"]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
metric.compute(predictions=predictions, references=references)
predictions = ["This plugin"]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
metric.compute(predictions=predictions, references=references)
import numpy as np
def compute_metrics(eval_preds):
preds, labels = eval_preds
# Dans le cas où le modèle retourne plus que les logits de prédiction
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Remplacer les -100 dans les étiquettes car nous ne pouvons pas les décoder
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Quelques post-traitements simples
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
return {"bleu": result["score"]}
from huggingface_hub import notebook_login
notebook_login()
from transformers import Seq2SeqTrainingArguments
args = Seq2SeqTrainingArguments(
f"marian-finetuned-kde4-en-to-fr",
evaluation_strategy="no",
save_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=32,
per_device_eval_batch_size=64,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=3,
predict_with_generate=True,
fp16=True,
push_to_hub=True,
)
from transformers import Seq2SeqTrainer
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.evaluate(max_length=max_target_length)
trainer.train()
trainer.evaluate(max_length=max_target_length)
trainer.push_to_hub(tags="translation", commit_message="Training complete")
from torch.utils.data import DataLoader
tokenized_datasets.set_format("torch")
train_dataloader = DataLoader(
tokenized_datasets["train"],
shuffle=True,
collate_fn=data_collator,
batch_size=8,
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], collate_fn=data_collator, batch_size=8
)
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
from transformers import AdamW
optimizer = AdamW(model.parameters(), lr=2e-5)
from accelerate import Accelerator
accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
from transformers import get_scheduler
num_train_epochs = 3
num_update_steps_per_epoch = len(train_dataloader)
num_training_steps = num_train_epochs * num_update_steps_per_epoch
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps,
)
from huggingface_hub import Repository, get_full_repo_name
model_name = "marian-finetuned-kde4-en-to-fr-accelerate"
repo_name = get_full_repo_name(model_name)
repo_name
output_dir = "marian-finetuned-kde4-en-to-fr-accelerate"
repo = Repository(output_dir, clone_from=repo_name)
def postprocess(predictions, labels):
predictions = predictions.cpu().numpy()
labels = labels.cpu().numpy()
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
# Remplacez -100 dans les étiquettes car nous ne pouvons pas les décoder
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Quelques post-traitements simples
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
return decoded_preds, decoded_labels
from tqdm.auto import tqdm
import torch
progress_bar = tqdm(range(num_training_steps))
for epoch in range(num_train_epochs):
# Entraînement
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
# Evaluation
model.eval()
for batch in tqdm(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
max_length=128,
)
labels = batch["labels"]
# Nécessaire pour rembourrer les prédictions et les étiquettes à rassembler
generated_tokens = accelerator.pad_across_processes(
generated_tokens, dim=1, pad_index=tokenizer.pad_token_id
)
labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100)
predictions_gathered = accelerator.gather(generated_tokens)
labels_gathered = accelerator.gather(labels)
decoded_preds, decoded_labels = postprocess(predictions_gathered, labels_gathered)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
results = metric.compute()
print(f"epoch {epoch}, BLEU score: {results['score']:.2f}")
# Sauvegarder et télécharger
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False
)
from transformers import pipeline
# Remplacer par votre propre checkpoint
model_checkpoint = "huggingface-course/marian-finetuned-kde4-en-to-fr"
translator = pipeline("translation", model=model_checkpoint)
translator("Default to expanded threads")
translator(
"Unable to import %1 using the OFX importer plugin. This file is not the correct format."
)<jupyter_output><empty_output> | notebooks/course/fr/chapter7/section4_pt.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter7/section4_pt.ipynb",
"repo_id": "notebooks",
"token_count": 3791
} | 145 |
<jupyter_start><jupyter_text>Partager ses démos avec d'autres Installez les bibliothèques 🤗 Transformers et 🤗 Gradio pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
!pip install gradio
import gradio as gr
title = "Poser une question (en anglais) à Rick"
description = """
Le bot a été entraîné à répondre à des questions basées sur les dialogues de Rick et Morty (en anglais). Demandez à Rick ce que vous voulez !
<img src="https://huggingface.co/spaces/course-demos/Rick_and_Morty_QA/resolve/main/rick.png" width=200px>
"""
article = "Consultez [le bot original Rick et Morty](https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot) sur lequel cette démo est basée."
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
tokenizer = AutoTokenizer.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2")
model = AutoModelForCausalLM.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2")
def predict(input, history=[]):
# tokenizer la nouvelle phrase d'entrée
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
# ajouter les nouveaux tokens d'entrée de l'utilisateur à l'historique de chat
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
# générer une réponse
history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
# convertit les tokens en texte, puis divise les réponses dans le bon format.
response = tokenizer.decode(history[0]).split("<|endoftext|>")
response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convertir en tuples de liste
return response, history
gr.Interface(
fn=predict,
inputs="textbox",
outputs="text",
title=title,
description=description,
article=article,
examples=[["What are you doing?"], ["Where should we time travel to?"]],
).launch()
# Vous devez récupérer le fichier pytorch_model.bin ici https://huggingface.co/spaces/course-demos/Sketch-Recognition/blob/main/pytorch_model.bin
import torch
import gradio as gr
from torch import nn
import requests
from google.colab import drive
drive.mount('/content/MyDrive/pytorch_model.bin')
LABELS = requests.get("https://huggingface.co/spaces/course-demos/Sketch-Recognition/raw/main/class_names.txt").text.replace("\n","").split("\r")
model = nn.Sequential(
nn.Conv2d(1, 32, 3, padding="same"),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding="same"),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding="same"),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1152, 256),
nn.ReLU(),
nn.Linear(256, len(LABELS)),
)
state_dict = torch.load("pytorch_model.bin", map_location="cpu")
model.load_state_dict(state_dict, strict=False)
model.eval()
def predict(im):
x = torch.tensor(im, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.0
with torch.no_grad():
out = model(x)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
values, indices = torch.topk(probabilities, 5)
return {LABELS[i]: v.item() for i, v in zip(indices, values)}
interface = gr.Interface(
predict,
inputs="sketchpad",
outputs="label",
theme="huggingface",
title="Reconnaissance de croquis",
description="Qui veut jouer au Pictionary ? Dessinez un objet courant comme une pelle ou un ordinateur portable, et l'algorithme le devinera en temps réel !",
article="<p style='text-align: center'>Reconnaissance de croquis | Modèle de démonstration</p>",
live=True,
)
interface.launch(share=True)<jupyter_output><empty_output> | notebooks/course/fr/chapter9/section4.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter9/section4.ipynb",
"repo_id": "notebooks",
"token_count": 1441
} | 146 |
<jupyter_start><jupyter_text>IntroductionThis notebook is designed to run inference on the [Diffuser](https://arxiv.org/abs/2205.09991) planning model for model-based RL. The notebook is modified from the authors' [original](https://colab.research.google.com/drive/1YajKhu-CUIGBJeQPehjVPJcK_b38a8Nc?usp=sharingscrollTo=57hSzI4mCgat). For those new to reinforcement learning, consider checking out the HuggingFace [Reinforcement Learning Course](https://huggingface.co/blog/deep-rl-intro) for a primer.> Colab made by [Nathan Lambert](https://natolambert.com) and [Ben Glickenhaus](https://www.linkedin.com/in/benjamin-glickenhaus-859532a3). Installing Packages `apt-get install` requirements These requirements primarily pertain to install mujoco and run it in the colab.Source was inspired by this (fairly recent) [demo](https://colab.research.google.com/drive/1KGMZdRq6AemfcNscKjgpRzXqfhUtCf-V?usp=sharing).<jupyter_code># installations primiarly needed for Mujoco
!apt-get install -y \
libgl1-mesa-dev \
libgl1-mesa-glx \
libglew-dev \
libosmesa6-dev \
software-properties-common
!apt-get install -y patchelf<jupyter_output>Reading package lists... Done
Building dependency tree
Reading state information... Done
libglew-dev is already the newest version (2.0.0-5).
libgl1-mesa-dev is already the newest version (20.0.8-0ubuntu1~18.04.1).
libgl1-mesa-glx is already the newest version (20.0.8-0ubuntu1~18.04.1).
libosmesa6-dev is already the newest version (20.0.8-0ubuntu1~18.04.1).
software-properties-common is already the newest version (0.96.24.32.18).
The following package was automatically installed and is no longer required:
libnvidia-common-460
Use 'apt autoremove' to remove it.
0 upgraded, 0 newly installed, 0 to remove and 27 not upgraded.
Reading package lists... Done
Building dependency tree
Reading state information... Done
patchelf is already the newest version (0.9-1).
The following package was automatically installed and is no longer required:
libnvidia-common-460
Use 'apt autoremove' to remove it.
0 upgraded, 0 newly installed, 0 to remove and 27 not upgraded.<jupyter_text>Install Diffusers<jupyter_code>%cd /content
# install latest HF diffusers
!rm -rf /content/diffusers/
!git clone -b rl https://github.com/huggingface/diffusers.git
!pip install -q /content/diffusers
!pip install -q datasets transformers<jupyter_output>/content
Found existing installation: diffusers 0.5.0.dev0
Uninstalling diffusers-0.5.0.dev0:
Successfully uninstalled diffusers-0.5.0.dev0
Cloning into 'diffusers'...
remote: Enumerating objects: 10356, done.[K
remote: Counting objects: 100% (502/502), done.[K
remote: Compressing objects: 100% (251/251), done.[K
remote: Total 10356 (delta 277), reused 384 (delta 201), pack-reused 9854[K
Receiving objects: 100% (10356/10356), 7.81 MiB | 17.77 MiB/s, done.
Resolving deltas: 100% (6885/6885), done.
[33m DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.
pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.[0m
Installing build dependencies ... [?25l[?25hdone
Getting requirements to build wh[...]<jupyter_text>`pip install` requirements<jupyter_code># primarily RL-sepcific requirements
%pip install -f https://download.pytorch.org/whl/torch_stable.html \
free-mujoco-py \
einops \
gym==0.24.1 \
protobuf==3.20.1 \
git+https://github.com/rail-berkeley/d4rl.git \
mediapy \
Pillow==9.0.0<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/
Looking in links: https://download.pytorch.org/whl/torch_stable.html
Collecting git+https://github.com/rail-berkeley/d4rl.git
Cloning https://github.com/rail-berkeley/d4rl.git to /tmp/pip-req-build-7j2y8u6t
Running command git clone -q https://github.com/rail-berkeley/d4rl.git /tmp/pip-req-build-7j2y8u6t
Requirement already satisfied: free-mujoco-py in /usr/local/lib/python3.7/dist-packages (2.1.6)
Requirement already satisfied: einops in /usr/local/lib/python3.7/dist-packages (0.5.0)
Requirement already satisfied: gym in /usr/local/lib/python3.7/dist-packages (0.24.1)
Requirement already satisfied: protobuf==3.20.1 in /usr/local/lib/python3.7/dist-packages (3.20.1)
Requirement already satisfied: mediapy in /usr/local/lib/python3.7/dist-packages (1.1.2)
Requirement already satisfied: Pillow==9.0.0 in /usr/local/lib/python3.7/dist-packages (9.0.0)
Collecting mjrl@ git+https://github.co[...]<jupyter_text>Import D4RL to initialize Mujoco[Mujoco](https://github.com/deepmind/mujoco) is a physics simulator used extensively in reinforcement learning research. Here, we import [D4RL](https://github.com/rail-berkeley/d4rl) (a library of datasets and environments for Offline RL), which results in the building of Mujoco.<jupyter_code>## cythonize mujoco-py at first import
import d4rl<jupyter_output>Warning: Gym version v0.24.1 has a number of critical issues with `gym.make` such that environment observation and action spaces are incorrectly evaluated, raising incorrect errors and warning . It is recommend to downgrading to v0.23.1 or upgrading to v0.25.1
Warning: Flow failed to import. Set the environment variable D4RL_SUPPRESS_IMPORT_ERROR=1 to suppress this message.
No module named 'flow'
Warning: CARLA failed to import. Set the environment variable D4RL_SUPPRESS_IMPORT_ERROR=1 to suppress this message.
No module named 'carla'
/usr/local/lib/python3.7/dist-packages/gym/envs/registration.py:416: UserWarning: [33mWARN: The `registry.env_specs` property along with `EnvSpecTree` is deprecated. Please use `registry` directly as a dictionary instead.[0m
"The `registry.env_specs` property along with `EnvSpecTree` is deprecated. Please use `registry` directly as a dictionary instead."<jupyter_text>--- Environment & Model SetupIn this section, we will create the environment, handle the data, and run the diffusion model. Imports<jupyter_code>import torch
import tqdm
import numpy as np
import gym<jupyter_output><empty_output><jupyter_text>Create environmentThis colab is designed to run with pretrained models from the hopper environment. As more models are trained, this can be extended.<jupyter_code>env_name = "hopper-medium-v2"
env = gym.make(env_name)
data = env.get_dataset() # dataset is only used for normalization in this colab<jupyter_output>/usr/local/lib/python3.7/dist-packages/gym/envs/mujoco/mujoco_env.py:47: UserWarning: [33mWARN: This version of the mujoco environments depends on the mujoco-py bindings, which are no longer maintained and may stop working. Please upgrade to the v4 versions of the environments (which depend on the mujoco python bindings instead), unless you are trying to precisely replicate previous works).[0m
"This version of the mujoco environments depends "
/usr/local/lib/python3.7/dist-packages/gym/spaces/box.py:112: UserWarning: [33mWARN: Box bound precision lowered by casting to float32[0m
logger.warn(f"Box bound precision lowered by casting to {self.dtype}")
/usr/local/lib/python3.7/dist-packages/gym/utils/passive_env_checker.py:70: UserWarning: [33mWARN: Agent's minimum action space value is -infinity. This is probably too low.[0m
"Agent's minimum action space value is -infinity. This is probably too low."
/usr/local/lib/python3.7/dist-packages/gym/utils/passive_env_checker.py:74: U[...]<jupyter_text>Define constants<jupyter_code># Cuda settings for colab
torch.cuda.get_device_name(0)
DEVICE = 'cuda:0'
DTYPE = torch.float
# diffusion model settings
n_samples = 4 # number of trajectories planned via diffusion
horizon = 128 # length of sampled trajectories
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
num_inference_steps = 20 # number of difusion steps<jupyter_output><empty_output><jupyter_text>Helper functions* `normalize` scales the state values corresponding to the training data-set in D4RL,* `de_normalize` unscales the data for correct rendering,* `to_torch` handles casting to torch for both numpy arrays and dicts (used for conditionning the model, see `reset_x0`).<jupyter_code>def normalize(x_in, data, key):
means = data[key].mean(axis=0)
stds = data[key].std(axis=0)
return (x_in - means) / stds
def de_normalize(x_in, data, key):
means = data[key].mean(axis=0)
stds = data[key].std(axis=0)
return x_in * stds + means
def to_torch(x_in, dtype=None, device=None):
dtype = dtype or DTYPE
device = device or DEVICE
if type(x_in) is dict:
return {k: to_torch(v, dtype, device) for k, v in x_in.items()}
elif torch.is_tensor(x_in):
return x_in.to(device).type(dtype)
return torch.tensor(x_in, dtype=dtype, device=device)<jupyter_output><empty_output><jupyter_text>Sample env. initial state<jupyter_code>## Can set environment seed for debugging
# torch.manual_seed(0)
# np.random.seed(0)
# env.seed(1996)
obs = env.reset()
obs_raw = obs
# normalize observations for forward passes
obs = normalize(obs, data, 'observations')<jupyter_output><empty_output><jupyter_text>Run the Diffusion Process -- from Scratch Initialize modelIn this section, we create a scheduler and load a pretrained model from the Hub. An important detail in the RL application space is to save `conditions` which will allow the model to optimize trajectories only from the current state (which is cruical to making decisions!).<jupyter_code>from diffusers import DDPMScheduler, UNet1DModel
# Two generators for different parts of the diffusion loop to work in colab
generator = torch.Generator(device='cuda')
generator_cpu = torch.Generator(device='cpu')
scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2")
# The horizion represents the length of trajectories used in training.
network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet").to(device=DEVICE)<jupyter_output><empty_output><jupyter_text>Planning helper function`reset_x0` is used to constrain the diffusion process to trajectories starting at the current state of the agent. Without this, the diffusion process would generate arbitrary high-reward trajectories, rather than trajectories beginning at the current state.<jupyter_code>def reset_x0(x_in, cond, act_dim):
for key, val in cond.items():
x_in[:, key, act_dim:] = val.clone()
return x_in<jupyter_output><empty_output><jupyter_text>Setup for denoising`conditions` is the variable used to hold the first state of the planned trajectories to the current state (it is passed into `reset_x0`).<jupyter_code>## add a batch dimension and repeat for multiple samples
## [ observation_dim ] --> [ n_samples x observation_dim ]
obs = obs[None].repeat(n_samples, axis=0)
conditions = {
0: to_torch(obs, device=DEVICE)
}
# constants for inference
batch_size = len(conditions[0])
shape = (batch_size, horizon, state_dim+action_dim)<jupyter_output><empty_output><jupyter_text>Sample initial noise<jupyter_code># sample random initial noise vector
x1 = torch.randn(shape, device=DEVICE, generator=generator)
# this model is conditioned from an initial state, so you will see this function
# multiple times to change the initial state of generated data to the state
# generated via env.reset() above or env.step() below
x = reset_x0(x1, conditions, action_dim)
# convert a np observation to torch for model forward pass
x = to_torch(x)<jupyter_output><empty_output><jupyter_text>Generate trajectoriesThe diffusion process for trajectories has 4 central components:1. sampling an predicted original sample from the model (note that this model directly predicts the sample, rather than the error term `epsilon` used in many diffusion models),2. use the scheduler to predict the sample at the previous timestep,3. [optional] add posterior noise to the sample,4. condition the trajectory to constrain the initial state.<jupyter_code>eta = 1.0 # noise factor for sampling reconstructed state
# run the diffusion process
# for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps):
for i in tqdm.tqdm(scheduler.timesteps):
# create batch of timesteps to pass into model
timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long)
# 1. generate prediction from model
with torch.no_grad():
residual = network(x.permute(0, 2, 1), timesteps).sample
residual = residual.permute(0, 2, 1) # needed to match model params to original
# 2. use the model prediction to reconstruct an observation (de-noise)
obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=False)["prev_sample"]
# 3. [optional] add posterior noise to the sample
if eta > 0:
noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device)
posterior_variance = scheduler._get_variance(i) # * noise
# no noise when t == 0
# NOTE: original implementation missing sqrt on posterior_variance
obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated
# 4. apply conditions to the trajectory
obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim)
x = to_torch(obs_reconstruct_postcond)
x.shape<jupyter_output><empty_output><jupyter_text>--- Render the samples Renderering ToolsRendering from Mujoco is historically not easy. Here is a modified version from the original paper. Additionally, a TODO is to investigate this web-based [viewer](https://github.com/kevinzakka/mjc_viewer). Video helpers<jupyter_code>import os
import mediapy as media
def to_np(x_in):
if torch.is_tensor(x_in):
x_in = x_in.detach().cpu().numpy()
return x_in
# from MJ's Diffuser code
# https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79
def mkdir(savepath):
"""
returns `True` iff `savepath` is created
"""
if not os.path.exists(savepath):
os.makedirs(savepath)
return True
else:
return False
def show_sample(renderer, observations, filename='sample.mp4', savebase='/content/videos'):
'''
observations : [ batch_size x horizon x observation_dim ]
'''
mkdir(savebase)
savepath = os.path.join(savebase, filename)
images = []
for rollout in observations:
## [ horizon x height x width x channels ]
img = renderer._renders(rollout, partial=True)
images.append(img)
## [ horizon x height x (batch_size * width) x channels ]
images = np.concatenate(images, axis=2)
media.show_video(images, codec='h264', fps=60)<jupyter_output><empty_output><jupyter_text>Renderer helpersThese functions involve setting the state of the environment and reading it out in a pixel form.<jupyter_code># Code adapted from Michael Janner
# source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py
import mujoco_py as mjc
def env_map(env_name):
'''
map D4RL dataset names to custom fully-observed
variants for rendering
'''
if 'halfcheetah' in env_name:
return 'HalfCheetahFullObs-v2'
elif 'hopper' in env_name:
return 'HopperFullObs-v2'
elif 'walker2d' in env_name:
return 'Walker2dFullObs-v2'
else:
return env_name
def get_image_mask(img):
background = (img == 255).all(axis=-1, keepdims=True)
mask = ~background.repeat(3, axis=-1)
return mask
def atmost_2d(x):
while x.ndim > 2:
x = x.squeeze(0)
return x
def set_state(env, state):
qpos_dim = env.sim.data.qpos.size
qvel_dim = env.sim.data.qvel.size
if not state.size == qpos_dim + qvel_dim:
warnings.warn(
f'[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, '
f'but got state of size {state.size}')
state = state[:qpos_dim + qvel_dim]
env.set_state(state[:qpos_dim], state[qpos_dim:])<jupyter_output><empty_output><jupyter_text>Rendering classUse the previously defined helpers to programatically render pixel sequences from a trajectory of states. This class takes the re-scaled outputs of the diffusion process and visualizes them.<jupyter_code>class MuJoCoRenderer:
'''
default mujoco renderer
'''
def __init__(self, env):
if type(env) is str:
env = env_map(env)
self.env = gym.make(env)
else:
self.env = env
## - 1 because the envs in renderer are fully-observed
## @TODO : clean up
self.observation_dim = np.prod(self.env.observation_space.shape) - 1
self.action_dim = np.prod(self.env.action_space.shape)
try:
self.viewer = mjc.MjRenderContextOffscreen(self.env.sim)
except:
print('[ utils/rendering ] Warning: could not initialize offscreen renderer')
self.viewer = None
def pad_observation(self, observation):
state = np.concatenate([
np.zeros(1),
observation,
])
return state
def pad_observations(self, observations):
qpos_dim = self.env.sim.data.qpos.size
## xpos is hidden
xvel_dim = qpos_dim - 1
xvel = observations[:, xvel_dim]
xpos = np.cumsum(xvel) * self.env.dt
states = np.concatenate([
xpos[:,None],
observations,
], axis=-1)
return states
def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None):
if type(dim) == int:
dim = (dim, dim)
if self.viewer is None:
return np.zeros((*dim, 3), np.uint8)
if render_kwargs is None:
xpos = observation[0] if not partial else 0
render_kwargs = {
'trackbodyid': 2,
'distance': 3,
'lookat': [xpos, -0.5, 1],
'elevation': -20
}
for key, val in render_kwargs.items():
if key == 'lookat':
self.viewer.cam.lookat[:] = val[:]
else:
setattr(self.viewer.cam, key, val)
if partial:
state = self.pad_observation(observation)
else:
state = observation
qpos_dim = self.env.sim.data.qpos.size
if not qvel or state.shape[-1] == qpos_dim:
qvel_dim = self.env.sim.data.qvel.size
state = np.concatenate([state, np.zeros(qvel_dim)])
set_state(self.env, state)
self.viewer.render(*dim)
data = self.viewer.read_pixels(*dim, depth=False)
data = data[::-1, :, :]
return data
def _renders(self, observations, **kwargs):
images = []
for observation in observations:
img = self.render(observation, **kwargs)
images.append(img)
return np.stack(images, axis=0)
def renders(self, samples, partial=False, **kwargs):
if partial:
samples = self.pad_observations(samples)
partial = False
sample_images = self._renders(samples, partial=partial, **kwargs)
composite = np.ones_like(sample_images[0]) * 255
for img in sample_images:
mask = get_image_mask(img)
composite[mask] = img[mask]
return composite
def __call__(self, *args, **kwargs):
return self.renders(*args, **kwargs)<jupyter_output><empty_output><jupyter_text>Show PlansThis section renders 4 trajectories chosen from the same initial state in the environment. Initialize renderer class for the environment<jupyter_code>render = MuJoCoRenderer(env)<jupyter_output><empty_output><jupyter_text>Show the videoShow the states generated by the diffusion model in the real environment. Not that the actions are dropped from the data.<jupyter_code>de_normalized = de_normalize(to_np(x[:,:,action_dim:]), data, 'observations')
show_sample(render, de_normalized)<jupyter_output><empty_output><jupyter_text>Run Value Guided Diffusion -- with PipelineIn this section, we repeat the above code, but we use a pre-trained pipeline in Diffusers!<jupyter_code>from diffusers import ValueGuidedRLPipeline
env_name = "hopper-medium-v2"
env = gym.make(env_name)
data = env.get_dataset() # dataset is only used for normalization in this colab
render = MuJoCoRenderer(env)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
DEVICE = "cuda"<jupyter_output><empty_output><jupyter_text>Load the pipeline!<jupyter_code>pipeline = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
obs = env.reset()
total_reward = 0
total_score = 0
T = 100
rollout = [obs.copy()]
trajectories = []
y_maxes = [0]
for t in tqdm.tqdm(range(T)):
# normalize observations for forward passes
denorm_actions = pipeline(obs, planning_horizon=32)
# execute action in environment
next_observation, reward, terminal, _ = env.step(denorm_actions)
score = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
f" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
obs = next_observation
show_sample(render, np.expand_dims(np.stack(rollout), axis=0))<jupyter_output><empty_output> | notebooks/diffusers/reinforcement_learning_with_diffusers.ipynb/0 | {
"file_path": "notebooks/diffusers/reinforcement_learning_with_diffusers.ipynb",
"repo_id": "notebooks",
"token_count": 8060
} | 147 |
<jupyter_start><jupyter_text>IDEFICS: A Flamingo-based model, trained at scale for the community Finetuning Demo Notebook: Credit: [Flamingo blog](https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model)This google colab notebook shows how to run predictions with the 4-bit quantized 🤗 [Idefics-9B model](https://huggingface.co/HuggingFaceM4/idefics-9b) and finetune it on a specific dataset.[IDEFICS](https://huggingface.co/HuggingFaceM4/idefics-80b) is a multi-modal model based on the [Flamingo](https://arxiv.org/abs/2204.14198) architecture. It can take images and texts as input and return text outputs but it does not support image generation. \\IDEFICS is built on top of two unimodal open-access pre-trained models to connect the two modalities. Newly initialized parameters in the form of Transformer blocks bridge the gap between the vision encoder and the language model. The model is trained on a mixture of image/text pairs and unstrucutred multimodal web documents. \\The [finetuned versions](https://huggingface.co/HuggingFaceM4/idefics-80b-instruct) of IDEFICS behave like LLM chatbots while also understanding visual input. \\You can play with the [demo here](https://huggingface.co/spaces/HuggingFaceM4/idefics_playground)The code for this notebook was contributed to by *Léo Tronchon, Younes Belkada, and Stas Bekman*, the IDEFICS model has been contributed to by: *Lucile Saulnier, Léo Tronchon, Hugo Laurençon, Stas Bekman, Amanpreet Singh, Siddharth Karamcheti, and Victor Sanh* Install and import necessary libraries<jupyter_code>!pip install -q datasets
!pip install -q git+https://github.com/huggingface/transformers.git@add-model-idefics
!pip install -q bitsandbytes sentencepiece accelerate loralib
!pip install -q -U git+https://github.com/huggingface/peft.git
import torch
from datasets import load_dataset
from peft import LoraConfig, get_peft_model
from PIL import Image
from transformers import IdeficsForVisionText2Text, AutoProcessor, Trainer, TrainingArguments, BitsAndBytesConfig
import torchvision.transforms as transforms<jupyter_output><empty_output><jupyter_text>Load quantized modelFirst get the quantized version of the model. This will allow us to use the 9B version of Idefics with a single 16GB gpu<jupyter_code>device = "cuda" if torch.cuda.is_available() else "cpu"
# checkpoint = "HuggingFaceM4/tiny-random-idefics"
checkpoint = "HuggingFaceM4/idefics-9b"
# Here we skip some special modules that can't be quantized properly
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
llm_int8_skip_modules=["lm_head", "embed_tokens"],
)
processor = AutoProcessor.from_pretrained(checkpoint, use_auth_token=True)
# Simply take-off the quantization_config arg if you want to load the original model
model = IdeficsForVisionText2Text.from_pretrained(checkpoint, quantization_config=bnb_config, device_map="auto")<jupyter_output>/usr/local/lib/python3.10/dist-packages/transformers/models/auto/processing_auto.py:203: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.
warnings.warn(<jupyter_text>If you print the model, you will see that all `nn.Linear` layers are in fact replaced by `bnb.nn.Linear4bit` layers.<jupyter_code>print(model)<jupyter_output>IdeficsForVisionText2Text(
(model): IdeficsModel(
(embed_tokens): IdeficsDecoupledEmbedding(
num_embeddings=32000, num_additional_embeddings=2, embedding_dim=4096, partially_freeze=False
(additional_embedding): Embedding(2, 4096)
)
(vision_model): IdeficsVisionTransformer(
(embeddings): IdeficsVisionEmbeddings(
(patch_embedding): Conv2d(3, 1280, kernel_size=(14, 14), stride=(14, 14), bias=False)
(position_embedding): Embedding(257, 1280)
)
(pre_layrnorm): LayerNorm((1280,), eps=1e-05, elementwise_affine=True)
(encoder): IdeficsVisionEncoder(
(layers): ModuleList(
(0-31): 32 x IdeficsVisionEncoderLayer(
(self_attn): IdeficsVisionAttention(
(k_proj): Linear4bit(in_features=1280, out_features=1280, bias=True)
(v_proj): Linear4bit(in_features=1280, out_features=1280, bias=True)
(q_proj): Linear4bit(in_features=1280, out_features=1280, bias=True)
[...]<jupyter_text>InferenceLet's make a simple method to test the model's inference<jupyter_code>def check_inference(model, processor, prompts, max_new_tokens=50):
tokenizer = processor.tokenizer
bad_words = ["<image>", "<fake_token_around_image>"]
if len(bad_words) > 0:
bad_words_ids = tokenizer(bad_words, add_special_tokens=False).input_ids
eos_token = "</s>"
eos_token_id = tokenizer.convert_tokens_to_ids(eos_token)
inputs = processor(prompts, return_tensors="pt").to(device)
generated_ids = model.generate(**inputs, eos_token_id=[eos_token_id], bad_words_ids=bad_words_ids, max_new_tokens=max_new_tokens, early_stopping=True)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(generated_text)<jupyter_output><empty_output><jupyter_text>Let's run prediction with the quantized model for the image below which pictures two kittens. \\<jupyter_code>url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
prompts = [
# "Instruction: provide an answer to the question. Use the image to answer.\n",
url,
"Question: What's on the picture? Answer:",
]
check_inference(model, processor, prompts, max_new_tokens=5)<jupyter_output>Question: What's on the picture? Answer: Two kittens.<jupyter_text>Now let's see how the model fares on pokemon knowledge before we try to finetune it further. \\<jupyter_code># check generation before finetuning
url = "https://images.pokemontcg.io/pop6/2_hires.png"
prompts = [
url,
"Question: What's on the picture? Answer:",
]
check_inference(model, processor, prompts, max_new_tokens=100)
# It looks like the model is already aware of pokemon - but it could be more specific, and less repetitive<jupyter_output>Question: What's on the picture? Answer: Lucario
Lucario is a Pokémon that is a combination of a bear and a lion. It is a Pokémon that is a combination of a bear and a lion. It is a Pokémon that is a combination of a bear and a lion. It is a Pokémon that is a combination of a bear and a lion. It is a Pokémon that is a combination of a bear and a lion. It is a Pok<jupyter_text>Finetuning datasetPrepare the dataset that will be used for finetuning<jupyter_code>def convert_to_rgb(image):
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
# for transparent images. The call to `alpha_composite` handles this case
if image.mode == "RGB":
return image
image_rgba = image.convert("RGBA")
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
alpha_composite = Image.alpha_composite(background, image_rgba)
alpha_composite = alpha_composite.convert("RGB")
return alpha_composite
def ds_transforms(example_batch):
image_size = processor.image_processor.image_size
image_mean = processor.image_processor.image_mean
image_std = processor.image_processor.image_std
image_transform = transforms.Compose([
convert_to_rgb,
transforms.RandomResizedCrop((image_size, image_size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=image_mean, std=image_std),
])
prompts = []
for i in range(len(example_batch['caption'])):
# We split the captions to avoid having very long examples, which would require more GPU ram during training
caption = example_batch['caption'][i].split(".")[0]
prompts.append(
[
example_batch['image_url'][i],
f"Question: What's on the picture? Answer: This is {example_batch['name'][i]}. {caption}</s>",
],
)
inputs = processor(prompts, transform=image_transform, return_tensors="pt").to(device)
inputs["labels"] = inputs["input_ids"]
return inputs
# load and prepare dataset
ds = load_dataset("TheFusion21/PokemonCards")
ds = ds["train"].train_test_split(test_size=0.002)
train_ds = ds["train"]
eval_ds = ds["test"]
train_ds.set_transform(ds_transforms)
eval_ds.set_transform(ds_transforms)<jupyter_output><empty_output><jupyter_text>LoRAAfter specifying the low-rank adapters (LoRA) config, we load the PeftModel using the get_peft_model utility function<jupyter_code>model_name = checkpoint.split("/")[1]
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "k_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
)
model = get_peft_model(model, config)
model.print_trainable_parameters()<jupyter_output>trainable params: 19,750,912 || all params: 8,949,430,544 || trainable%: 0.2206946230030432<jupyter_text>TrainingFinally, using the Hugging Face Trainer, we can finetune the model!For the sake of the demo, we have set the max_steps at 40. That's about 0.05 epoch on this dataset, so feel free to tune further!It has been reported that fine-tuning in mixed precision fp16 can lead to overflows. As such, we recommend training in mixed precision bf16 when possible.<jupyter_code>training_args = TrainingArguments(
output_dir=f"{model_name}-pokemon",
learning_rate=2e-4,
fp16=True,
per_device_train_batch_size=2,
per_device_eval_batch_size=2,
gradient_accumulation_steps=8,
dataloader_pin_memory=False,
save_total_limit=3,
evaluation_strategy="steps",
save_strategy="steps",
save_steps=40,
eval_steps=20,
logging_steps=20,
max_steps=40,
remove_unused_columns=False,
push_to_hub=False,
label_names=["labels"],
load_best_model_at_end=True,
report_to=None,
optim="paged_adamw_8bit",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_ds,
eval_dataset=eval_ds,
)
trainer.train()
# check generation again after finetuning
check_inference(model, processor, prompts, max_new_tokens=100)<jupyter_output>Question: What's on the picture? Answer: This is Lucario. A Stage 2 Pokemon Card of type Fighting with the title Lucario and 90 HP of rarity Rare evolved from Pikachu from the set Neo Destiny and the flavor text: It can use its tail as a whip<jupyter_text>Push your new model to the hub!<jupyter_code># Insert your "write" token. You should find it in the settings of your HF profile
!huggingface-cli login
model.push_to_hub(f"{model_name}-pokemon", private=False)<jupyter_output><empty_output> | notebooks/examples/idefics/finetune_image_captioning_peft.ipynb/0 | {
"file_path": "notebooks/examples/idefics/finetune_image_captioning_peft.ipynb",
"repo_id": "notebooks",
"token_count": 3875
} | 148 |
<jupyter_start><jupyter_text>How to export 🤗 Transformers Models to ONNX ? [ONNX](http://onnx.ai/) is open format for machine learning models. It allows to save your neural network's computation graph in a framework agnostic way, which might be particulary helpful when deploying deep learning models.Indeed, businesses might have other requirements _(languages, hardware, ...)_ for which the training framework might not be the best suited in inference scenarios. In that context, having a representation of the actual computation graph that can be shared accross various business units and logics across an organization might be a desirable component.Along with the serialization format, ONNX also provides a runtime library which allows efficient and hardware specific execution of the ONNX graph. This is done through the [onnxruntime](https://microsoft.github.io/onnxruntime/) project and already includes collaborations with many hardware vendors to seamlessly deploy models on various platforms.Through this notebook we'll walk you through the process to convert a PyTorch or TensorFlow transformers model to the [ONNX](http://onnx.ai/) and leverage [onnxruntime](https://microsoft.github.io/onnxruntime/) to run inference tasks on models from 🤗 __transformers__ Exporting 🤗 transformers model to ONNX---Exporting models _(either PyTorch or TensorFlow)_ is easily achieved through the conversion tool provided as part of 🤗 __transformers__ repository. Under the hood the process is sensibly the following: 1. Allocate the model from transformers (**PyTorch or TensorFlow**)2. Forward dummy inputs through the model this way **ONNX** can record the set of operations executed3. Optionally define dynamic axes on input and output tensors4. Save the graph along with the network parameters<jupyter_code>import sys
!{sys.executable} -m pip install --upgrade git+https://github.com/huggingface/transformers
!{sys.executable} -m pip install --upgrade torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
!{sys.executable} -m pip install --upgrade onnxruntime==1.4.0
!{sys.executable} -m pip install -i https://test.pypi.org/simple/ ort-nightly
!{sys.executable} -m pip install --upgrade onnxruntime-tools<jupyter_output>Collecting git+https://github.com/huggingface/transformers
Cloning https://github.com/huggingface/transformers to /tmp/pip-req-build-9rvbp9p8
Running command git clone -q https://github.com/huggingface/transformers /tmp/pip-req-build-9rvbp9p8
Requirement already satisfied, skipping upgrade: numpy in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers==3.0.2) (1.18.1)
Requirement already satisfied, skipping upgrade: tokenizers==0.8.1.rc2 in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers==3.0.2) (0.8.1rc2)
Requirement already satisfied, skipping upgrade: packaging in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers==3.0.2) (20.4)
Requirement already satisfied, skipping upgrade: filelock in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers==3.0.2) (3.0.12)
Requirement already satisfied, skipping upgrade: requests in /home/mfuntowicz/[...]<jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry
send_example_telemetry("onnx_export_notebook", framework="pytorch")
!rm -rf onnx/
from pathlib import Path
from transformers.convert_graph_to_onnx import convert
# Handles all the above steps for you
convert(framework="pt", model="bert-base-cased", output=Path("onnx/bert-base-cased.onnx"), opset=11)
# Tensorflow
# convert(framework="tf", model="bert-base-cased", output="onnx/bert-base-cased.onnx", opset=11)<jupyter_output>loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json from cache at /home/mfuntowicz/.cache/torch/transformers/b945b69218e98b3e2c95acf911789741307dec43c698d35fad11c1ae28bda352.9da767be51e1327499df13488672789394e2ca38b877837e52618a67d7002391
Model config BertConfig {
"architectures": [
"BertForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 28996
}<jupyter_text>How to leverage runtime for inference over an ONNX graph---As mentionned in the introduction, **ONNX** is a serialization format and many side projects can load the saved graph and run the actual computations from it. Here, we'll focus on the official [onnxruntime](https://microsoft.github.io/onnxruntime/). The runtime is implemented in C++ for performance reasons and provides API/Bindings for C++, C, C, Java and Python.In the case of this notebook, we will use the Python API to highlight how to load a serialized **ONNX** graph and run inference workload on various backends through **onnxruntime**.**onnxruntime** is available on pypi:- onnxruntime: ONNX + MLAS (Microsoft Linear Algebra Subprograms)- onnxruntime-gpu: ONNX + MLAS + CUDA<jupyter_code>!pip install transformers onnxruntime-gpu onnx psutil matplotlib<jupyter_output>Requirement already satisfied: transformers in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (3.0.2)
Requirement already satisfied: onnxruntime-gpu in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (1.3.0)
Requirement already satisfied: onnx in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (1.7.0)
Requirement already satisfied: psutil in /home/mfuntowicz/.local/lib/python3.8/site-packages/psutil-5.7.0-py3.8-linux-x86_64.egg (5.7.0)
Requirement already satisfied: matplotlib in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (3.3.1)
Requirement already satisfied: tqdm>=4.27 in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers) (4.46.1)
Requirement already satisfied: numpy in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers) (1.18.1)
Requirement already satisfied: sacremoses in /home/mfuntowicz/miniconda3/envs/pytorch/lib/pyt[...]<jupyter_text>Preparing for an Inference Session---Inference is done using a specific backend definition which turns on hardware specific optimizations of the graph. Optimizations are basically of three kinds: - **Constant Folding**: Convert static variables to constants in the graph - **Deadcode Elimination**: Remove nodes never accessed in the graph- **Operator Fusing**: Merge multiple instruction into one (Linear -> ReLU can be fused to be LinearReLU)ONNX Runtime automatically applies most optimizations by setting specific `SessionOptions`.Note:Some of the latest optimizations that are not yet integrated into ONNX Runtime are available in [optimization script](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers) that tunes models for the best performance.<jupyter_code># # An optional step unless
# # you want to get a model with mixed precision for perf accelartion on newer GPU
# # or you are working with Tensorflow(tf.keras) models or pytorch models other than bert
# !pip install onnxruntime-tools
# from onnxruntime_tools import optimizer
# # Mixed precision conversion for bert-base-cased model converted from Pytorch
# optimized_model = optimizer.optimize_model("bert-base-cased.onnx", model_type='bert', num_heads=12, hidden_size=768)
# optimized_model.convert_model_float32_to_float16()
# optimized_model.save_model_to_file("bert-base-cased.onnx")
# # optimizations for bert-base-cased model converted from Tensorflow(tf.keras)
# optimized_model = optimizer.optimize_model("bert-base-cased.onnx", model_type='bert_keras', num_heads=12, hidden_size=768)
# optimized_model.save_model_to_file("bert-base-cased.onnx")
# optimize transformer-based models with onnxruntime-tools
from onnxruntime_tools import optimizer
from onnxruntime_tools.transformers.onnx_model_bert import BertOptimizationOptions
# disable embedding layer norm optimization for better model size reduction
opt_options = BertOptimizationOptions('bert')
opt_options.enable_embed_layer_norm = False
opt_model = optimizer.optimize_model(
'onnx/bert-base-cased.onnx',
'bert',
num_heads=12,
hidden_size=768,
optimization_options=opt_options)
opt_model.save_model_to_file('bert.opt.onnx')
from os import environ
from psutil import cpu_count
# Constants from the performance optimization available in onnxruntime
# It needs to be done before importing onnxruntime
environ["OMP_NUM_THREADS"] = str(cpu_count(logical=True))
environ["OMP_WAIT_POLICY"] = 'ACTIVE'
from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions, get_all_providers
from contextlib import contextmanager
from dataclasses import dataclass
from time import time
from tqdm import trange
def create_model_for_provider(model_path: str, provider: str) -> InferenceSession:
assert provider in get_all_providers(), f"provider {provider} not found, {get_all_providers()}"
# Few properties that might have an impact on performances (provided by MS)
options = SessionOptions()
options.intra_op_num_threads = 1
options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
# Load the model as a graph and prepare the CPU backend
session = InferenceSession(model_path, options, providers=[provider])
session.disable_fallback()
return session
@contextmanager
def track_infer_time(buffer: [int]):
start = time()
yield
end = time()
buffer.append(end - start)
@dataclass
class OnnxInferenceResult:
model_inference_time: [int]
optimized_model_path: str<jupyter_output><empty_output><jupyter_text>Forwarding through our optimized ONNX model running on CPU---When the model is loaded for inference over a specific provider, for instance **CPUExecutionProvider** as above, an optimized graph can be saved. This graph will might include various optimizations, and you might be able to see some **higher-level** operations in the graph _(through [Netron](https://github.com/lutzroeder/Netron) for instance)_ such as:- **EmbedLayerNormalization**- **Attention**- **FastGeLU**These operations are an example of the kind of optimization **onnxruntime** is doing, for instance here gathering multiple operations into bigger one _(Operator Fusing)_.<jupyter_code>from transformers import BertTokenizerFast
tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased")
cpu_model = create_model_for_provider("onnx/bert-base-cased.onnx", "CPUExecutionProvider")
# Inputs are provided through numpy array
model_inputs = tokenizer("My name is Bert", return_tensors="pt")
inputs_onnx = {k: v.cpu().detach().numpy() for k, v in model_inputs.items()}
# Run the model (None = get all the outputs)
sequence, pooled = cpu_model.run(None, inputs_onnx)
# Print information about outputs
print(f"Sequence output: {sequence.shape}, Pooled output: {pooled.shape}")<jupyter_output>loading file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt from cache at /home/mfuntowicz/.cache/torch/transformers/5e8a2b4893d13790ed4150ca1906be5f7a03d6c4ddf62296c383f6db42814db2.e13dbb970cb325137104fb2e5f36fe865f27746c6b526f6352861b1980eb80b1<jupyter_text>Benchmarking PyTorch model_Note: PyTorch model benchmark is run on CPU_<jupyter_code>from transformers import BertModel
PROVIDERS = {
("cpu", "PyTorch CPU"),
# Uncomment this line to enable GPU benchmarking
# ("cuda:0", "PyTorch GPU")
}
results = {}
for device, label in PROVIDERS:
# Move inputs to the correct device
model_inputs_on_device = {
arg_name: tensor.to(device)
for arg_name, tensor in model_inputs.items()
}
# Add PyTorch to the providers
model_pt = BertModel.from_pretrained("bert-base-cased").to(device)
for _ in trange(10, desc="Warming up"):
model_pt(**model_inputs_on_device)
# Compute
time_buffer = []
for _ in trange(100, desc=f"Tracking inference time on PyTorch"):
with track_infer_time(time_buffer):
model_pt(**model_inputs_on_device)
# Store the result
results[label] = OnnxInferenceResult(
time_buffer,
None
)<jupyter_output>loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json from cache at /home/mfuntowicz/.cache/torch/transformers/b945b69218e98b3e2c95acf911789741307dec43c698d35fad11c1ae28bda352.9da767be51e1327499df13488672789394e2ca38b877837e52618a67d7002391
Model config BertConfig {
"architectures": [
"BertForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"type_vocab_size": 2,
"vocab_size": 28996
}
loading weights file https://cdn.huggingface.co/bert-base-cased-pytorch_model.bin from cache at /home/mfuntowicz/.cache/torch/transformers/d8f11f061e407be64c4d5d7867ee61d1465263e24085cfa26abf183fdc830569.3fadbea36[...]<jupyter_text>Benchmarking PyTorch & ONNX on CPU_**Disclamer: results may vary from the actual hardware used to run the model**_<jupyter_code>PROVIDERS = {
("CPUExecutionProvider", "ONNX CPU"),
# Uncomment this line to enable GPU benchmarking
# ("CUDAExecutionProvider", "ONNX GPU")
}
for provider, label in PROVIDERS:
# Create the model with the specified provider
model = create_model_for_provider("onnx/bert-base-cased.onnx", provider)
# Keep track of the inference time
time_buffer = []
# Warm up the model
model.run(None, inputs_onnx)
# Compute
for _ in trange(100, desc=f"Tracking inference time on {provider}"):
with track_infer_time(time_buffer):
model.run(None, inputs_onnx)
# Store the result
results[label] = OnnxInferenceResult(
time_buffer,
model.get_session_options().optimized_model_filepath
)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
# Compute average inference time + std
time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()}
time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(16, 12))
ax.set_ylabel("Avg Inference time (ms)")
ax.set_title("Average inference time (ms) for each provider")
ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std)
plt.show()<jupyter_output><empty_output><jupyter_text>Quantization support from transformersQuantization enables the use of integers (_instead of floatting point_) arithmetic to run neural networks models faster. From a high-level point of view, quantization works as mapping the float32 ranges of values as int8 with the less loss in the performances of the model.Hugging Face provides a conversion tool as part of the transformers repository to easily export quantized models to ONNX Runtime. For more information, please refer to the following: - [Hugging Face Documentation on ONNX Runtime quantization supports](https://huggingface.co/transformers/master/serialization.htmlquantization)- [Intel's Explanation of Quantization](https://nervanasystems.github.io/distiller/quantization.html)With this method, the accuracy of the model remains at the same level than the full-precision model. If you want to see benchmarks on model performances, we recommand reading the [ONNX Runtime notebook](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/Bert-GLUE_OnnxRuntime_quantization.ipynb) on the subject. Benchmarking PyTorch quantized model<jupyter_code>import torch
# Quantize
model_pt_quantized = torch.quantization.quantize_dynamic(
model_pt.to("cpu"), {torch.nn.Linear}, dtype=torch.qint8
)
# Warm up
model_pt_quantized(**model_inputs)
# Benchmark PyTorch quantized model
time_buffer = []
for _ in trange(100):
with track_infer_time(time_buffer):
model_pt_quantized(**model_inputs)
results["PyTorch CPU Quantized"] = OnnxInferenceResult(
time_buffer,
None
)<jupyter_output>100%|██████████| 100/100 [00:01<00:00, 90.15it/s]<jupyter_text>Benchmarking ONNX quantized model<jupyter_code>from transformers.convert_graph_to_onnx import quantize
# Transformers allow you to easily convert float32 model to quantized int8 with ONNX Runtime
quantized_model_path = quantize(Path("bert.opt.onnx"))
# Then you just have to load through ONNX runtime as you would normally do
quantized_model = create_model_for_provider(quantized_model_path.as_posix(), "CPUExecutionProvider")
# Warm up the overall model to have a fair comparaison
outputs = quantized_model.run(None, inputs_onnx)
# Evaluate performances
time_buffer = []
for _ in trange(100, desc=f"Tracking inference time on CPUExecutionProvider with quantized model"):
with track_infer_time(time_buffer):
outputs = quantized_model.run(None, inputs_onnx)
# Store the result
results["ONNX CPU Quantized"] = OnnxInferenceResult(
time_buffer,
quantized_model_path
)<jupyter_output>As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint.
This limitation will be removed in the next release of onnxruntime.
Quantized model has been written at bert.onnx: ✔<jupyter_text>Show the inference performance of each providers<jupyter_code>%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
# Compute average inference time + std
time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()}
time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(16, 12))
ax.set_ylabel("Avg Inference time (ms)")
ax.set_title("Average inference time (ms) for each provider")
ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std)
plt.show()<jupyter_output><empty_output> | notebooks/examples/onnx-export.ipynb/0 | {
"file_path": "notebooks/examples/onnx-export.ipynb",
"repo_id": "notebooks",
"token_count": 6241
} | 149 |
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install the most recent versions of 🤗 Transformers and 🤗 Datasets. We will also need `scipy` and `scikit-learn` for some of the metrics. Uncomment the following cell and run it.<jupyter_code>#! pip install transformers
#! pip install datasets
#! pip install scipy sklearn
#! pip install huggingface_hub<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the latest version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow:First you have to create an access token on the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your token.<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:<jupyter_code># !apt install git-lfs
# !git config --global user.email "[email protected]"
# !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.16.0 since the functionality was introduced in that version:<jupyter_code>import transformers
print(transformers.__version__)<jupyter_output>4.22.0.dev0<jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry
send_example_telemetry("text_classification_notebook", framework="tensorflow")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a text classification task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a text classification task of the [GLUE Benchmark](https://gluebenchmark.com/).The GLUE Benchmark is a group of nine classification tasks on sentences or pairs of sentences which are:- [CoLA](https://nyu-mll.github.io/CoLA/) (Corpus of Linguistic Acceptability) Determine if a sentence is grammatically correct or not.is a dataset containing sentences labeled grammatically correct or not.- [MNLI](https://arxiv.org/abs/1704.05426) (Multi-Genre Natural Language Inference) Determine if a sentence entails, contradicts or is unrelated to a given hypothesis. (This dataset has two versions, one with the validation and test set coming from the same distribution, another called mismatched where the validation and test use out-of-domain data.)- [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) (Microsoft Research Paraphrase Corpus) Determine if two sentences are paraphrases from one another or not.- [QNLI](https://rajpurkar.github.io/SQuAD-explorer/) (Question-answering Natural Language Inference) Determine if the answer to a question is in the second sentence or not. (This dataset is built from the SQuAD dataset.)- [QQP](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs) (Quora Question Pairs2) Determine if two questions are semantically equivalent or not.- [RTE](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment) (Recognizing Textual Entailment) Determine if a sentence entails a given hypothesis or not.- [SST-2](https://nlp.stanford.edu/sentiment/index.html) (Stanford Sentiment Treebank) Determine if the sentence has a positive or negative sentiment.- [STS-B](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) (Semantic Textual Similarity Benchmark) Determine the similarity of two sentences with a score from 1 to 5.- [WNLI](https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html) (Winograd Natural Language Inference) Determine if a sentence with an anonymous pronoun and a sentence with this pronoun replaced are entailed or not. (This dataset is built from the Winograd Schema Challenge dataset.)We will see how to easily load the dataset for each one of those tasks and use Keras to fine-tune a model on it. Each task is named by its acronym, with `mnli-mm` standing for the mismatched version of MNLI (a task with the same training set as `mnli` but different validation and test sets):<jupyter_code>GLUE_TASKS = [
"cola",
"mnli",
"mnli-mm",
"mrpc",
"qnli",
"qqp",
"rte",
"sst2",
"stsb",
"wnli",
]<jupyter_output><empty_output><jupyter_text>This notebook is built to run on any of the tasks in the list above, with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a classification head. Depending on your model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set these three parameters, then the rest of the notebook should run smoothly:<jupyter_code>task = "cola"
model_checkpoint = "distilbert-base-uncased"
batch_size = 16<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and the [🤗 Evaluate](https://github.com/huggingface/datasets) library to get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the `load_dataset` function from `datasets` and and the `load` function from `evaluate`.<jupyter_code>from datasets import load_dataset
from evaluate import load<jupyter_output><empty_output><jupyter_text>With the exception of `mnli-mm`, we can directly pass our task name to those functions. `load_dataset` will cache the dataset to avoid downloading it again the next time you run this cell.<jupyter_code>actual_task = "mnli" if task == "mnli-mm" else task
dataset = load_dataset("glue", actual_task)
metric = load("glue", actual_task)<jupyter_output>Reusing dataset glue (/home/matt/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)<jupyter_text>The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set (with more keys for the mismatched validation and test set in the special case of `mnli`).<jupyter_code>dataset<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>dataset["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(dataset["train"])<jupyter_output><empty_output><jupyter_text>The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):<jupyter_code>metric<jupyter_output><empty_output><jupyter_text>You can call its `compute` method with your predictions and labels directly and it will return a dictionary with the metric(s) value:<jupyter_code>import numpy as np
fake_preds = np.random.randint(0, 2, size=(64,))
fake_labels = np.random.randint(0, 2, size=(64,))
metric.compute(predictions=fake_preds, references=fake_labels)<jupyter_output><empty_output><jupyter_text>Note that `load` has loaded the proper metric associated to your task, which is:- for CoLA: [Matthews Correlation Coefficient](https://en.wikipedia.org/wiki/Matthews_correlation_coefficient)- for MNLI (matched or mismatched): Accuracy- for MRPC: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for QNLI: Accuracy- for QQP: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for RTE: Accuracy- for SST-2: Accuracy- for STS-B: [Pearson Correlation Coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) and [Spearman's_Rank_Correlation_Coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)- for WNLI: Accuracyso the metric object only computes the one(s) needed for your task. Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this is a sentence!", "And this sentence goes with it.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.To preprocess our dataset, we will thus need the names of the columns containing the sentence(s). The following dictionary keeps track of the correspondence task to column names:<jupyter_code>task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mnli-mm": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}<jupyter_output><empty_output><jupyter_text>We can double check it does work on our current dataset:<jupyter_code>sentence1_key, sentence2_key = task_to_keys[task]
if sentence2_key is None:
print(f"Sentence: {dataset['train'][0][sentence1_key]}")
else:
print(f"Sentence 1: {dataset['train'][0][sentence1_key]}")
print(f"Sentence 2: {dataset['train'][0][sentence2_key]}")<jupyter_output>Sentence: Our friends won't buy this analysis, let alone the next one we propose.<jupyter_text>We can them write the function that will preprocess our samples. We just feed them to the `tokenizer` with the arguments `truncation=True` and `padding='longest`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model, and all inputs will be padded to the maximum input length to give us a single input array. A more performant method that reduces the number of padding tokens is to write a generator or `tf.data.Dataset` to only pad each *batch* to the maximum length in that batch, but most GLUE tasks are relatively quick on modern GPUs either way.<jupyter_code>def preprocess_function(examples):
if sentence2_key is None:
return tokenizer(examples[sentence1_key], truncation=True)
return tokenizer(examples[sentence1_key], examples[sentence2_key], truncation=True)<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>preprocess_function(dataset["train"][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>pre_tokenizer_columns = set(dataset["train"].features)
encoded_dataset = dataset.map(preprocess_function, batched=True)
tokenizer_columns = list(set(encoded_dataset["train"].features) - pre_tokenizer_columns)
print("Columns added by tokenizer:", tokenizer_columns)
encoded_dataset["train"].features["label"]<jupyter_output><empty_output><jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about sentence classification, we use the `TFAutoModelForSequenceClassification` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which is always 2, except for STS-B which is a regression problem and MNLI where we have 3 labels).<jupyter_code>from transformers import TFAutoModelForSequenceClassification
import tensorflow as tf
num_labels = 3 if task.startswith("mnli") else 1 if task == "stsb" else 2
if task == "stsb":
num_labels = 1
elif task.startswith("mnli"):
num_labels = 3
else:
num_labels = 2
# This next little bit is optional, but will give us cleaner label outputs later
# If you're using a task other than CoLA, you will probably need to change these
# to match the label names for your task!
id2label = {0: "Invalid", 1: "Valid"}
label2id = {val: key for key, val in id2label.items()}
model = TFAutoModelForSequenceClassification.from_pretrained(
model_checkpoint, num_labels=num_labels, id2label=id2label, label2id=label2id
)<jupyter_output>2022-08-03 13:07:25.935388: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: forward compatibility was attempted on non supported HW
2022-08-03 13:07:25.935426: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: matt-TRX40-AORUS-PRO-WIFI
2022-08-03 13:07:25.935434: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: matt-TRX40-AORUS-PRO-WIFI
2022-08-03 13:07:25.935556: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 470.141.3
2022-08-03 13:07:25.935580: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 470.129.6
2022-08-03 13:07:25.935586: E tensorflow/stream_executor/cuda/cuda_diagnostics.cc:313] kernel version 470.129.6 does not match DSO version 470.141.3 -- cannot find working devices in this configuration
2022-08-03 13:07:25.935836: I tensorflow/core/platform/cpu[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do.Next, we convert our datasets to `tf.data.Dataset`, which Keras understands natively. There are two ways to do this - we can use the slightly more low-level [`Dataset.to_tf_dataset()`](https://huggingface.co/docs/datasets/package_reference/main_classesdatasets.Dataset.to_tf_dataset) method, or we can use [`Model.prepare_tf_dataset()`](https://huggingface.co/docs/transformers/main_classes/modeltransformers.TFPreTrainedModel.prepare_tf_dataset). The main difference between these two is that the `Model` method can inspect the model to determine which column names it can use as input, which means you don't need to specify them yourself. Unless our samples are all the same length, we will also need to pass a `tokenizer` or `collate_fn` so that the `tf.data.Dataset` knows how to pad and combine samples into a batch.<jupyter_code>validation_key = (
"validation_mismatched"
if task == "mnli-mm"
else "validation_matched"
if task == "mnli"
else "validation"
)
tf_train_dataset = model.prepare_tf_dataset(
encoded_dataset["train"],
shuffle=True,
batch_size=16,
tokenizer=tokenizer
)
tf_validation_dataset = model.prepare_tf_dataset(
encoded_dataset[validation_key],
shuffle=False,
batch_size=16,
tokenizer=tokenizer,
)<jupyter_output><empty_output><jupyter_text>Next, we need to set up our optimizer and `compile()` our model. The `create_optimizer` function in the Transformers library creates a very useful `AdamW` optimizer with weight and learning rate decay. This performs very well for training most transformer networks - we recommend using it as your default unless you have a good reason not to! Note, however, that because it decays the learning rate over the course of training, it needs to know how many batches it will see during training.Note that all models in `transformers` can pick a sensible loss function by default. To use this loss, simply do not pass a `loss` argument to `compile()`. Although the losses for GLUE tasks are usually just simple cross-entropy, this can be very helpful in models when the loss is intricate and contains multiple terms.In some of our other examples, we use `jit_compile` to compile the model with [XLA](https://www.tensorflow.org/xla). In this case, we should be careful about that - because our inputs have variable sequence lengths, we may end up having to do a new XLA compilation for each possible length, because XLA compilation expects a static input shape! For small datasets, this will probably result in spending more time on XLA compilation than actually training, which isn't very helpful.If you really want to use XLA without these problems (for example, if you're training on TPU), you can create a tokenizer with `padding="max_length"`. This will pad all of your samples to the same length, ensuring that a single XLA compilation will suffice for your entire dataset. Note that depending on the nature of your dataset, this may result in a lot of wasted computation on padding tokens!<jupyter_code>from transformers import create_optimizer
num_epochs = 3
batches_per_epoch = len(encoded_dataset["train"]) // batch_size
total_train_steps = int(batches_per_epoch * num_epochs)
optimizer, schedule = create_optimizer(
init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps
)
model.compile(optimizer=optimizer)<jupyter_output>No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>The last thing to define is how to compute the metrics from the predictions. We need to define a function for this, which will just use the `metric` we loaded earlier. The only preprocessing we have to do is to take the argmax of our predicted logits (our just squeeze the last axis in the case of STS-B).In addition, let's wrap this metric computation function in a `KerasMetricCallback`. This callback will compute the metric on the validation set each epoch, including printing it and logging it for other callbacks like `TensorBoard` and `EarlyStopping`.Why do it this way, though, and not just use a straightforward Keras Metric object? This is a good question - on this task, several of the metrics such as Accuracy are very straightforward, and it would probably make more sense to just use a Keras metric for those instead. However, we want to demonstrate the use of `KerasMetricCallback` here, because it can handle any arbitrary Python function for the metric computation. This turns out to be very important for other NLP tasks like summarization and translation, where standard metrics like `BLEU` and `ROUGE` are much more complex to compute, and often involve decoding tokens generated by the model to strings and comparing their similarity to target sentences. If you want to stop training once `ROUGE` scores on the validation set start to decline, then `KerasMetricCallback` is essential.That said, if you're only interested in tasks like text classification with straightforward metrics, then by all means remove the `KerasMetricCallback` and use a Keras `Accuracy` metric instead! With that out of the way, how do we actually use `KerasMetricCallback`? It's straightfoward: We simply define a function that computes metrics given a tuple of numpy arrays of predictions and labels, then we pass that, along with the validation set to compute metrics on, to the callback:<jupyter_code>from transformers.keras_callbacks import KerasMetricCallback
def compute_metrics(eval_predictions):
predictions, labels = eval_predictions
if task != "stsb":
predictions = np.argmax(predictions, axis=1)
else:
predictions = predictions[:, 0]
return metric.compute(predictions=predictions, references=labels)
metric_callback = KerasMetricCallback(
metric_fn=compute_metrics, eval_dataset=tf_validation_dataset
)<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `fit` method. Be sure to pass the TF datasets, and not the original datasets! We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.<jupyter_code>from transformers.keras_callbacks import PushToHubCallback
from tensorflow.keras.callbacks import TensorBoard
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-{task}"
tensorboard_callback = TensorBoard(log_dir="./text_classification_model_save/logs")
push_to_hub_callback = PushToHubCallback(
output_dir="./text_classification_model_save",
tokenizer=tokenizer,
hub_model_id=push_to_hub_model_id,
)
callbacks = [metric_callback, tensorboard_callback, push_to_hub_callback]
model.fit(
tf_train_dataset,
validation_data=tf_validation_dataset,
epochs=num_epochs,
callbacks=callbacks,
)<jupyter_output>/home/matt/PycharmProjects/notebooks/examples/text_classification_model_save is already a clone of https://huggingface.co/Rocketknight1/distilbert-base-uncased-finetuned-cola. Make sure you pull the latest changes with `repo.git_pull()`.<jupyter_text>To see how your model fared you can compare it to the [GLUE Benchmark leaderboard](https://gluebenchmark.com/leaderboard). If you used the callback above, you can now share this model with all your friends, family or favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import TFAutoModelForSequenceClassificationmodel = TFAutoModelForSequenceClassification.from_pretrained("your-username/my-awesome-model")``` Inference Training a model is fun, but once it's trained you usually want to use it to get predictions on new data. Let's take a look at how to do that. Firstly, we'll load our trained model from the hub - this lets us resume the code from here without needing to rerun all the training above every time.<jupyter_code>from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
# You can, of course, use your own username and model name here once you've pushed your model using the code above!
model = TFAutoModelForSequenceClassification.from_pretrained("Rocketknight1/distilbert-base-uncased-finetuned-cola")
tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/distilbert-base-uncased-finetuned-cola")<jupyter_output><empty_output><jupyter_text>Now, let's make up some sentences and see if the model can classify them properly! The first sentence is valid English, but the second one makes a grammatical mistake.<jupyter_code>sentences = [
"The judge told the jurors to think carefully.",
"The judge told that the jurors to think carefully."
]<jupyter_output><empty_output><jupyter_text>To feed them into our model, we'll need to tokenize them and then get our model's predictions:<jupyter_code>tokenized = tokenizer(sentences, return_tensors="np", padding="longest")
outputs = model(tokenized).logits
classifications = np.argmax(outputs, axis=1)
print(classifications)<jupyter_output>[1 0]<jupyter_text>What do those label values mean? Let's use the `id2label` property set on our model to make them a little more comprehensible:<jupyter_code>classifications = [model.config.id2label[output] for output in classifications]
print(classifications)<jupyter_output>['Valid', 'Invalid']<jupyter_text>Looks right to me! Pipeline API An alternative way to quickly perform inference with any model on the hub is to use the [Pipeline API](https://huggingface.co/docs/transformers/main_classes/pipelines), which abstracts away all the steps we did manually above. It will perform the preprocessing, forward pass and postprocessing all in a single object.Let's showcase this for our trained model:<jupyter_code>from transformers import pipeline
classifier = pipeline("text-classification", "Rocketknight1/distilbert-base-uncased-finetuned-cola", framework="tf")
classifier(sentences)<jupyter_output><empty_output> | notebooks/examples/text_classification-tf.ipynb/0 | {
"file_path": "notebooks/examples/text_classification-tf.ipynb",
"repo_id": "notebooks",
"token_count": 8177
} | 150 |
<jupyter_start><jupyter_text><jupyter_code>!pip install transformers
!sudo apt-get install git-lfs
!git config --global user.email "[email protected]"
!git config --global user.name "Julien Chaumond"
!transformers-cli login
!pwd
!transformers-cli repo create policy-distilbert-7d
!git clone https://julien-c:[email protected]/julien-c/policy-distilbert-7d
!ls -al
%cd policy-distilbert-7d
!wget https://huggingface.co/distilbert-base-uncased/resolve/main/config.json
!wget https://huggingface.co/distilbert-base-uncased/resolve/main/pytorch_model.bin
!git lfs install
!git add .
!git commit -m "from Google Colab"
!git log
!git push<jupyter_output>Git LFS: (1 of 1 files) 255.55 MB / 255.55 MB
Counting objects: 4, done.
Delta compression using up to 4 threads.
Compressing objects: 100% (4/4), done.
Writing objects: 100% (4/4), 712 bytes | 712.00 KiB/s, done.
Total 4 (delta 0), reused 0 (delta 0)
To https://huggingface.co/julien-c/policy-distilbert-7d
4630180..41a7c98 main -> main<jupyter_text>Check out resulting commit: https://huggingface.co/julien-c/policy-distilbert-7d/commit/41a7c98f1285a7e5ef19095dab11f0ac71ac1406<jupyter_code><jupyter_output><empty_output> | notebooks/huggingface_hub/upload_hf_model.ipynb/0 | {
"file_path": "notebooks/huggingface_hub/upload_hf_model.ipynb",
"repo_id": "notebooks",
"token_count": 478
} | 151 |
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Distributed Training Demo for `TensorFlow` Distributed Data Parallelism with `transformers` and `tensorflow` 1. [Introduction](Introduction) 2. [Development Environment and Permissions](Development-Environment-and-Permissions) 1. [Installation](Installation) 2. [Development environment](Development-environment) 3. [Permissions](Permissions)3. [Processing](Preprocessing) 1. [Tokenization](Tokenization) 2. [Uploading data to sagemaker_session_bucket](Uploading-data-to-sagemaker_session_bucket) 4. [Fine-tuning & starting Sagemaker Training Job](Fine-tuning-\&-starting-Sagemaker-Training-Job) 1. [Creating an Estimator and start a training job](Creating-an-Estimator-and-start-a-training-job) 2. [Estimator Parameters](Estimator-Parameters) 3. [Download fine-tuned model from s3](Download-fine-tuned-model-from-s3) 3. [Attach to old training job to an estimator ](Attach-to-old-training-job-to-an-estimator) 5. [_Coming soon_:Push model to the Hugging Face hub](Push-model-to-the-Hugging-Face-hub) IntroductionWelcome to our distributed end-to-end binary Text-Classification example. In this demo, we will use the Hugging Faces `transformers` and `datasets` library together with a custom Amazon sagemaker-sdk extension to fine-tune a pre-trained transformer on binary text classification. In particular, the pre-trained model will be fine-tuned using the `imdb` dataset. To speed upload Training we are going to use SageMaker distributed Data Parallel library to run our training distributed across multiple gpus. To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. _**NOTE: You can run this demo in Sagemaker Studio, your local machine or Sagemaker Notebook Instances**_ Development Environment and Permissions Installation_*Note:* we only install the required libraries from Hugging Face and AWS. You also need PyTorch or Tensorflow, if you haven´t it installed_<jupyter_code>!pip install "sagemaker>=2.48.0" --upgrade<jupyter_output><empty_output><jupyter_text>Development environment **upgrade ipywidgets for `datasets` library and restart kernel, only needed when prerpocessing is done in the notebook**<jupyter_code>%%capture
import IPython
!conda install -c conda-forge ipywidgets -y
IPython.Application.instance().kernel.do_shutdown(True) # has to restart kernel so changes are used
import sagemaker.huggingface<jupyter_output><empty_output><jupyter_text>Permissions _If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sess.default_bucket()}")
print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>PreprocessingIn this example the preproccsing will be done in the `train.py` when executing the script. You could also move the `preprocessing` outside of the script and upload the data to s3 and pass it into it. Fine-tuning & starting Sagemaker Training JobIn order to create a sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. In a Estimator we define, which fine-tuning script should be used as `entry_point`, which `instance_type` should be used, which `hyperparameters` are passed in .....```pythonhuggingface_estimator = HuggingFace(entry_point='train.py', source_dir='./scripts', base_job_name='huggingface-sdk-extension', instance_type='ml.p3.2xlarge', instance_count=1, transformers_version='4.4', pytorch_version='1.6', py_version='py37', role=role, hyperparameters = {'epochs': 1, 'train_batch_size': 32, 'model_name':'distilbert-base-uncased' })```When we create a SageMaker training job, SageMaker takes care of starting and managing all the required ec2 instances for us with the `huggingface` container, uploads the provided fine-tuning script `train.py` and downloads the data from our `sagemaker_session_bucket` into the container at `/opt/ml/input/data`. Then, it starts the training job by running. ```python/opt/conda/bin/python train.py --epochs 1 --model_name distilbert-base-uncased --train_batch_size 32```The `hyperparameters` you define in the `HuggingFace` estimator are passed in as named arguments. Sagemaker is providing useful properties about the training environment through various environment variables, including the following:* `SM_MODEL_DIR`: A string that represents the path where the training job writes the model artifacts to. After training, artifacts in this directory are uploaded to S3 for model hosting.* `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host.* `SM_CHANNEL_XXXX:` A string that represents the path to the directory that contains the input data for the specified channel. For example, if you specify two input channels in the HuggingFace estimator’s fit call, named `train` and `test`, the environment variables `SM_CHANNEL_TRAIN` and `SM_CHANNEL_TEST` are set.To run your training job locally you can define `instance_type='local'` or `instance_type='local_gpu'` for gpu usage. _Note: this does not working within SageMaker Studio_<jupyter_code>!pygmentize ./scripts/train.py<jupyter_output>[34mimport[39;49;00m [04m[36margparse[39;49;00m
[34mimport[39;49;00m [04m[36mlogging[39;49;00m
[34mimport[39;49;00m [04m[36mos[39;49;00m
[34mimport[39;49;00m [04m[36msys[39;49;00m
[34mimport[39;49;00m [04m[36mtensorflow[39;49;00m [34mas[39;49;00m [04m[36mtf[39;49;00m
[34mfrom[39;49;00m [04m[36mdatasets[39;49;00m [34mimport[39;49;00m load_dataset
[34mfrom[39;49;00m [04m[36mtqdm[39;49;00m [34mimport[39;49;00m tqdm
[34mfrom[39;49;00m [04m[36mtransformers[39;49;00m [34mimport[39;49;00m AutoTokenizer, TFAutoModelForSequenceClassification
[34mfrom[39;49;00m [04m[36mtransformers[39;49;00m[04m[36m.[39;49;00m[04m[36mfile_utils[39;49;00m [34mimport[39;49;00m is_sagemaker_distributed_available
[34mif[39;49;00m os.environ.get([33m"[39;49;00m[33mSDP_ENABLED[39;49;00m[33m"[39;49;00m) [35mor[39;49;00m is_sagemaker_distributed_available():
SDP_ENABLED = [34mTrue[39;49;00m
os.environ[[33m"[39;49;00m[33[...]<jupyter_text>Creating an Estimator and start a training job<jupyter_code>from sagemaker.huggingface import HuggingFace
# hyperparameters, which are passed into the training job
hyperparameters={
'epochs': 1,
'train_batch_size': 16,
'model_name':'distilbert-base-uncased',
}
# configuration for running training on smdistributed Data Parallel
distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}}
# instance configurations
instance_type='ml.p3dn.24xlarge'
instance_count=2
volume_size=200
huggingface_estimator = HuggingFace(
entry_point='train.py',
source_dir='./scripts',
instance_type=instance_type,
instance_count=instance_count,
role=role,
transformers_version='4.6',
tensorflow_version='2.4',
py_version='py37',
distribution=distribution,
hyperparameters=hyperparameters,
debugger_hook_config=False, # currently needed
)
huggingface_estimator.fit()<jupyter_output><empty_output><jupyter_text>Deploying the endpointTo deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type.<jupyter_code>predictor = huggingface_estimator.deploy(1,"ml.g4dn.xlarge")<jupyter_output><empty_output><jupyter_text>Then, we use the returned predictor object to call the endpoint.<jupyter_code>sentiment_input= {"inputs":"I love using the new Inference DLC."}
predictor.predict(sentiment_input)<jupyter_output><empty_output><jupyter_text>Finally, we delete the endpoint again.<jupyter_code>predictor.delete_endpoint()<jupyter_output><empty_output><jupyter_text>Extras Estimator Parameters<jupyter_code># container image used for training job
print(f"container image used for training job: \n{huggingface_estimator.image_uri}\n")
# s3 uri where the trained model is located
print(f"s3 uri where the trained model is located: \n{huggingface_estimator.model_data}\n")
# latest training job name for this estimator
print(f"latest training job name for this estimator: \n{huggingface_estimator.latest_training_job.name}\n")
# access the logs of the training job
huggingface_estimator.sagemaker_session.logs_for_job(huggingface_estimator.latest_training_job.name)<jupyter_output><empty_output><jupyter_text>Attach to old training job to an estimator In Sagemaker you can attach an old training job to an estimator to continue training, get results etc..<jupyter_code>from sagemaker.estimator import Estimator
# job which is going to be attached to the estimator
old_training_job_name=''
# attach old training job
huggingface_estimator_loaded = Estimator.attach(old_training_job_name)
# get model output s3 from training job
huggingface_estimator_loaded.model_data<jupyter_output><empty_output> | notebooks/sagemaker/07_tensorflow_distributed_training_data_parallelism/sagemaker-notebook.ipynb/0 | {
"file_path": "notebooks/sagemaker/07_tensorflow_distributed_training_data_parallelism/sagemaker-notebook.ipynb",
"repo_id": "notebooks",
"token_count": 3614
} | 152 |
<jupyter_start><jupyter_text>Accelerate BERT Inference with Hugging Face Transformers and AWS inferentia In this end-to-end tutorial, you will learn how to speed up BERT inference for text classification with Hugging Face Transformers, Amazon SageMaker, and AWS Inferentia. You will learn how to: 1. Convert your Hugging Face Transformer to AWS Neuron (Inferentia)2. Create a custom `inference.py` script for `text-classification`3. Create and upload the neuron model and inference script to Amazon S34. Deploy a Real-time Inference Endpoint on Amazon SageMaker5. Run and evaluate Inference performance of BERT on InferentiaLet's get started! 🚀---*If you are going to use Sagemaker in a local environment (not SageMaker Studio or Notebook Instances). You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it.* 1. Convert your Hugging Face Transformer to AWS NeuronWe are going to use the [AWS Neuron SDK for AWS Inferentia](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html). The Neuron SDK includes a deep learning compiler, runtime, and tools for converting and compiling PyTorch and TensorFlow models to neuron compatible models, which can be run on [EC2 Inf1 instances](https://aws.amazon.com/ec2/instance-types/inf1/). As a first step, we need to install the [Neuron SDK](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-intro/neuron-install-guide.html) and the required packages.*Tip: If you are using Amazon SageMaker Notebook Instances or Studio you can go with the `conda_python3` conda kernel.*<jupyter_code># Set Pip repository to point to the Neuron repository
!pip config set global.extra-index-url https://pip.repos.neuron.amazonaws.com
# Install Neuron PyTorch
!pip install torch-neuron==1.9.1.* neuron-cc[tensorflow] sagemaker>=2.79.0 transformers==4.12.3 --upgrade<jupyter_output><empty_output><jupyter_text>After we have installed the Neuron SDK we can convert load and convert our model. Neuron models are converted using `torch_neuron` with its `trace` method similar to `torchscript`. You can find more information in our [documentation](https://huggingface.co/docs/transformers/serializationtorchscript).To be able to convert our model we first need to select the model we want to use for our text classification pipeline from [hf.co/models](http://hf.co/models). For this example lets go with [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) but this can be easily adjusted with other BERT-like models.<jupyter_code>model_id = "distilbert-base-uncased-finetuned-sst-2-english"<jupyter_output><empty_output><jupyter_text>At the time of writing, the [AWS Neuron SDK does not support dynamic shapes](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.htmldynamic-shapes), which means that the input size needs to be static for compiling and inference. In simpler terms, this means when the model is compiled with an input of batch size 1 and sequence length of 16. The model can only run inference on inputs with the same shape._When using a `t2.medium` instance the compiling takes around 2-3 minutes_<jupyter_code>import os
import tensorflow # to workaround a protobuf version conflict issue
import torch
import torch.neuron
from transformers import AutoTokenizer, AutoModelForSequenceClassification
# load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSequenceClassification.from_pretrained(model_id, torchscript=True)
# create dummy input for max length 128
dummy_input = "dummy input which will be padded later"
max_length = 128
embeddings = tokenizer(dummy_input, max_length=max_length, padding="max_length",return_tensors="pt")
neuron_inputs = tuple(embeddings.values())
# compile model with torch.neuron.trace and update config
model_neuron = torch.neuron.trace(model, neuron_inputs)
model.config.update({"traced_sequence_length": max_length})
# save tokenizer, neuron model and config for later use
save_dir="tmp"
os.makedirs("tmp",exist_ok=True)
model_neuron.save(os.path.join(save_dir,"neuron_model.pt"))
tokenizer.save_pretrained(save_dir)
model.config.save_pretrained(save_dir)<jupyter_output>Couldn't call 'get_role' to get Role ARN from role name philippschmid to get Role path.<jupyter_text>2. Create a custom inference.py script for text-classificationThe [Hugging Face Inference Toolkit](https://github.com/aws/sagemaker-huggingface-inference-toolkit) supports zero-code deployments on top of the [pipeline feature](https://huggingface.co/transformers/main_classes/pipelines.html) from 🤗 Transformers. This allows users to deploy Hugging Face transformers without an inference script [[Example](https://github.com/huggingface/notebooks/blob/master/sagemaker/11_deploy_model_from_hf_hub/deploy_transformer_model_from_hf_hub.ipynb)]. Currently is this feature not supported with AWS Inferentia, which means we need to provide an `inference.py` for running inference. *If you would be interested in support for zero-code deployments for inferentia let us know on the [forum](https://discuss.huggingface.co/c/sagemaker/17).*---To use the inference script, we need to create an `inference.py` script. In our example, we are going to overwrite the `model_fn` to load our neuron model and the `predict_fn` to create a text-classification pipeline. If you want to know more about the `inference.py` script check out this [example](https://github.com/huggingface/notebooks/blob/master/sagemaker/17_custom_inference_script/sagemaker-notebook.ipynb). It explains amongst other things what the `model_fn` and `predict_fn` are.<jupyter_code>!mkdir code<jupyter_output><empty_output><jupyter_text>We are using the `NEURON_RT_NUM_CORES=1` to make sure that each HTTP worker uses 1 Neuron core to maximize throughput.<jupyter_code>%%writefile code/inference.py
import os
from transformers import AutoConfig, AutoTokenizer
import torch
import torch.neuron
# To use one neuron core per worker
os.environ["NEURON_RT_NUM_CORES"] = "1"
# saved weights name
AWS_NEURON_TRACED_WEIGHTS_NAME = "neuron_model.pt"
def model_fn(model_dir):
# load tokenizer and neuron model from model_dir
tokenizer = AutoTokenizer.from_pretrained(model_dir)
model = torch.jit.load(os.path.join(model_dir, AWS_NEURON_TRACED_WEIGHTS_NAME))
model_config = AutoConfig.from_pretrained(model_dir)
return model, tokenizer, model_config
def predict_fn(data, model_tokenizer_model_config):
# destruct model, tokenizer and model config
model, tokenizer, model_config = model_tokenizer_model_config
# create embeddings for inputs
inputs = data.pop("inputs", data)
embeddings = tokenizer(
inputs,
return_tensors="pt",
max_length=model_config.traced_sequence_length,
padding="max_length",
truncation=True,
)
# convert to tuple for neuron model
neuron_inputs = tuple(embeddings.values())
# run prediciton
with torch.no_grad():
predictions = model(*neuron_inputs)[0]
scores = torch.nn.Softmax(dim=1)(predictions)
# return dictonary, which will be json serializable
return [{"label": model_config.id2label[item.argmax().item()], "score": item.max().item()} for item in scores]<jupyter_output>Overwriting code/inference.py<jupyter_text>3. Create and upload the neuron model and inference script to Amazon S3Before we can deploy our neuron model to Amazon SageMaker we need to create a `model.tar.gz` archive with all our model artifacts saved into `tmp/`, e.g. `neuron_model.pt` and upload this to Amazon S3.To do this we need to set up our permissions.<jupyter_code>import sagemaker
import boto3
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sess.default_bucket()}")
print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>Next, we create our `model.tar.gz`.The `inference.py` script will be placed into a `code/` folder.<jupyter_code># copy inference.py into the code/ directory of the model directory.
!cp -r code/ tmp/code/
# create a model.tar.gz archive with all the model artifacts and the inference.py script.
%cd tmp
!tar zcvf model.tar.gz *
%cd ..<jupyter_output><empty_output><jupyter_text>Now we can upload our `model.tar.gz` to our session S3 bucket with `sagemaker`.<jupyter_code>from sagemaker.s3 import S3Uploader
# create s3 uri
s3_model_path = f"s3://{sess.default_bucket()}/{model_id}"
# upload model.tar.gz
s3_model_uri = S3Uploader.upload(local_path="tmp/model.tar.gz",desired_s3_uri=s3_model_path)
print(f"model artifcats uploaded to {s3_model_uri}")<jupyter_output><empty_output><jupyter_text>4. Deploy a Real-time Inference Endpoint on Amazon SageMakerAfter we have uploaded our `model.tar.gz` to Amazon S3 can we create a custom `HuggingfaceModel`. This class will be used to create and deploy our real-time inference endpoint on Amazon SageMaker.<jupyter_code>from sagemaker.huggingface.model import HuggingFaceModel
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
model_data=s3_model_uri, # path to your model and script
role=role, # iam role with permissions to create an Endpoint
transformers_version="4.12", # transformers version used
pytorch_version="1.9", # pytorch version used
py_version='py37', # python version used
)
# Let SageMaker know that we've already compiled the model via neuron-cc
huggingface_model._is_compiled_model = True
# deploy the endpoint endpoint
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type="ml.inf1.xlarge" # AWS Inferentia Instance
)<jupyter_output><empty_output><jupyter_text>5. Run and evaluate Inference performance of BERT on InferentiaThe `.deploy()` returns an `HuggingFacePredictor` object which can be used to request inference.<jupyter_code>data = {
"inputs": "the mesmerizing performances of the leads keep the film grounded and keep the audience riveted .",
}
res = predictor.predict(data=data)
res<jupyter_output><empty_output><jupyter_text>We managed to deploy our neuron compiled BERT to AWS Inferentia on Amazon SageMaker. Now, let's test its performance of it. As a dummy load test will we loop and send 10000 synchronous requests to our endpoint.<jupyter_code># send 10000 requests
for i in range(10000):
resp = predictor.predict(
data={"inputs": "it 's a charming and often affecting journey ."}
)<jupyter_output><empty_output><jupyter_text>Let's inspect the performance in cloudwatch.<jupyter_code>print(f"https://console.aws.amazon.com/cloudwatch/home?region={sess.boto_region_name}#metricsV2:graph=~(metrics~(~(~'AWS*2fSageMaker~'ModelLatency~'EndpointName~'{predictor.endpoint_name}~'VariantName~'AllTraffic))~view~'timeSeries~stacked~false~region~'{sess.boto_region_name}~start~'-PT5M~end~'P0D~stat~'Average~period~30);query=~'*7bAWS*2fSageMaker*2cEndpointName*2cVariantName*7d*20{predictor.endpoint_name}")<jupyter_output><empty_output><jupyter_text>The average latency for our BERT model is `5-6ms` for a sequence length of 128. **Delete model and endpoint**To clean up, we can delete the model and endpoint.<jupyter_code>predictor.delete_model()
predictor.delete_endpoint()<jupyter_output><empty_output> | notebooks/sagemaker/18_inferentia_inference/sagemaker-notebook.ipynb/0 | {
"file_path": "notebooks/sagemaker/18_inferentia_inference/sagemaker-notebook.ipynb",
"repo_id": "notebooks",
"token_count": 3902
} | 153 |
<jupyter_start><jupyter_text>Document AI: Fine-tuning Donut for document-parsing using Hugging Face Transformers on Amazon SageMakerIn this tutorial, you will learn how to fine-tune and deploy [Donut-base](https://huggingface.co/naver-clova-ix/donut-base) for document-understand/document-parsing using Hugging Face Transformers and Amazon SageMaker. Donut is a new document-understanding model achieving state-of-art performance with an MIT-license, which allows it to be used for commercial purposes compared to other models like LayoutLMv2/LayoutLMv3. You will learn how to:1. [Setup Development Environment](1-setup-development-environment)2. [Load SROIE dataset](2-load-sroie-dataset)3. [Preprocess and upload dataset for Donut](3-preprocess-and-upload-dataset-for-donut)4. [Fine-tune Donut model on Amazon SageMaker](4-fine-tune-and-evaluate-donut-model)5. [Deploy Donut model on Amazon SageMaker](5-deploy-donut-model) Quick intro: Document Understanding Transformer (Donut) by ClovaAIDocument Understanding Transformer (Donut) is a new Transformer model for OCR-free document understanding. It doesn't require an OCR engine to process scanned documents but is achieving state-of-the-art performances on various visual document understanding tasks, such as visual document classification or information extraction (a.k.a. document parsing). Donut is a multimodal sequence-to-sequence model with a vision encoder ([Swin Transformer](https://huggingface.co/docs/transformers/v4.21.2/en/model_doc/swinoverview)) and text decoder ([BART](https://huggingface.co/docs/transformers/v4.21.2/en/model_doc/bart)). The encoder receives the images and computes it into an embedding, which is then passed to the decoder, which generates a sequence of tokens.* Paper: https://arxiv.org/abs/2111.15664* Official repo: https://github.com/clovaai/donut--- Now we know how Donut works, so let's get started. 🚀 1. Setup Development EnvironmentThe first step is to install the required libraries and setup the environment. We will use the [Amazon SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/) to interact with SageMaker. We will also use [Hugging Face Transformers & Datasets](https://huggingface.co/transformers/) to preprocess the data.<jupyter_code>!pip install "transformers[sentencepiece]==4.26.0" "datasets[s3]==2.9.0" sagemaker --upgrade --quiet<jupyter_output><empty_output><jupyter_text>If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it.<jupyter_code>import sagemaker
import boto3
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sess.default_bucket()}")
print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>2. Load SROIE datasetWe will use the [SROIE](https://github.com/zzzDavid/ICDAR-2019-SROIE) dataset a collection of 1000 scanned receipts including their OCR, more specifically we will use the dataset from task 2 "Scanned Receipt OCR". The available dataset on Hugging Face ([darentang/sroie](https://huggingface.co/datasets/darentang/sroie)) is not compatible with Donut. Thats why we will use the original dataset together with the `imagefolder` feature of `datasets` to load our dataset. Learn more about loading image data [here](https://huggingface.co/docs/datasets/v2.4.0/en/image_loadload-image-data)._Note: The test data for task2 is sadly not available. Meaning that we end up only with 626 images._First, we will clone the repository, extract the dataset into a separate folder and remove the unnecessary files.<jupyter_code>%%bash
# clone repository
git clone https://github.com/zzzDavid/ICDAR-2019-SROIE.git
# copy data
cp -r ICDAR-2019-SROIE/data ./
# clean up
rm -rf ICDAR-2019-SROIE
rm -rf data/box<jupyter_output><empty_output><jupyter_text>Now we have two folders inside the `data/` directory. One contains the images of the receipts and the other contains the OCR text. The nex step is to create a `metadata.json` file that contains the information about the images including the OCR-text. This is necessary for the `imagefolder` feature of `datasets`.The `metadata.json` should look at the end similar to the example below.```json{"file_name": "0001.png", "text": "This is a golden retriever playing with a ball"}{"file_name": "0002.png", "text": "A german shepherd"}```In our example will `"text"` column contain the OCR text of the image, which will later be used for creating the Donut specific format.<jupyter_code>import json
from pathlib import Path
import shutil
# define paths
base_path = Path("data")
metadata_path = base_path.joinpath("key")
image_path = base_path.joinpath("img")
# define metadata list
metadata_list = []
# parse metadata
for file_name in metadata_path.glob("*.json"):
with open(file_name, "r") as json_file:
# load json file
data = json.load(json_file)
# create "text" column with json string
text = json.dumps(data)
# add to metadata list if image exists
if image_path.joinpath(f"{file_name.stem}.jpg").is_file():
metadata_list.append({"text":text,"file_name":f"{file_name.stem}.jpg"})
# delete json file
# write jsonline file
with open(image_path.joinpath('metadata.jsonl'), 'w') as outfile:
for entry in metadata_list:
json.dump(entry, outfile)
outfile.write('\n')
# remove old meta data
shutil.rmtree(metadata_path)<jupyter_output><empty_output><jupyter_text>Good Job! Now we can load the dataset using the `imagefolder` feature of `datasets`.<jupyter_code>from datasets import load_dataset
dataset = load_dataset("imagefolder", data_dir=image_path, split="train")
print(f"Dataset has {len(dataset)} images")
print(f"Dataset features are: {dataset.features.keys()}")<jupyter_output><empty_output><jupyter_text>Now, lets take a closer look at our dataset<jupyter_code>import random
random_sample = random.randint(0, len(dataset))
print(f"Random sample is {random_sample}")
print(f"OCR text is {dataset[random_sample]['text']}")
dataset[random_sample]['image'].resize((250,400))<jupyter_output>Random sample is 623
OCR text is {"company": "ONE ONE THREE SEAFOOD RESTAURANT SDN BHD", "date": "23-06-2018", "address": "NO.1, TAMAN SRI DENGKIL, JALAN AIR HITAM 43800 DENGKIL, SELANGOR.", "total": "179.50"}<jupyter_text>3. Preprocess and upload dataset for DonutAs we learned in the introduction, Donut is a sequence-to-sequence model with a vision encoder and text decoder. When fine-tuning the model we want it to generate the `"text"` based on the image we pass it. Similar to NLP tasks, we have to tokenize and preprocess the text. Before we can tokenize the text, we need to transform the JSON string into a Donut compatible document. **current JSON string**```json{"company": "ADVANCO COMPANY", "date": "17/01/2018", "address": "NO 1&3, JALAN WANGSA DELIMA 12, WANGSA LINK, WANGSA MAJU, 53300 KUALA LUMPUR", "total": "7.00"}```**Donut document**```jsonADVANCO COMPANY17/01/2018NO 1&3, JALAN WANGSA DELIMA 12, WANGSA LINK, WANGSA MAJU, 53300 KUALA LUMPUR7.00```To easily create those documents the ClovaAI team has created a [json2token](https://github.com/clovaai/donut/blob/master/donut/model.pyL497) method, which we extract and then apply.<jupyter_code>new_special_tokens = [] # new tokens which will be added to the tokenizer
task_start_token = "<s>" # start of task token
eos_token = "</s>" # eos token of tokenizer
def json2token(obj, update_special_tokens_for_json_key: bool = True, sort_json_key: bool = True):
"""
Convert an ordered JSON object into a token sequence
"""
if type(obj) == dict:
if len(obj) == 1 and "text_sequence" in obj:
return obj["text_sequence"]
else:
output = ""
if sort_json_key:
keys = sorted(obj.keys(), reverse=True)
else:
keys = obj.keys()
for k in keys:
if update_special_tokens_for_json_key:
new_special_tokens.append(fr"<s_{k}>") if fr"<s_{k}>" not in new_special_tokens else None
new_special_tokens.append(fr"</s_{k}>") if fr"</s_{k}>" not in new_special_tokens else None
output += (
fr"<s_{k}>"
+ json2token(obj[k], update_special_tokens_for_json_key, sort_json_key)
+ fr"</s_{k}>"
)
return output
elif type(obj) == list:
return r"<sep/>".join(
[json2token(item, update_special_tokens_for_json_key, sort_json_key) for item in obj]
)
else:
# excluded special tokens for now
obj = str(obj)
if f"<{obj}/>" in new_special_tokens:
obj = f"<{obj}/>" # for categorical special tokens
return obj
def preprocess_documents_for_donut(sample):
# create Donut-style input
text = json.loads(sample["text"])
d_doc = task_start_token + json2token(text) + eos_token
# convert all images to RGB
image = sample["image"].convert('RGB')
return {"image": image, "text": d_doc}
proc_dataset = dataset.map(preprocess_documents_for_donut)
print(f"Sample: {proc_dataset[45]['text']}")
print(f"New special tokens: {new_special_tokens + [task_start_token] + [eos_token]}")<jupyter_output><empty_output><jupyter_text>The next step is to tokenize our text and encode the images into tensors. Therefore we need to load `DonutProcessor`, add our new special tokens and adjust the size of the images when processing from `[1920, 2560]` to `[720, 960]` to need less memory and have faster training.<jupyter_code>from transformers import DonutProcessor
# Load processor
model_id = "naver-clova-ix/donut-base"
processor = DonutProcessor.from_pretrained(model_id)
# add new special tokens to tokenizer
processor.tokenizer.add_special_tokens({"additional_special_tokens": new_special_tokens + [task_start_token] + [eos_token]})
# we update some settings which differ from pretraining; namely the size of the images + no rotation required
# resizing the image to smaller sizes from [1920, 2560] to [960,1280]
processor.feature_extractor.size = [720,960] # should be (width, height)
processor.feature_extractor.do_align_long_axis = False<jupyter_output><empty_output><jupyter_text>Now, we can prepare our dataset, which we will use for the training later.<jupyter_code>def transform_and_tokenize(sample, processor=processor, split="train", max_length=512, ignore_id=-100):
# create tensor from image
try:
pixel_values = processor(
sample["image"], random_padding=split == "train", return_tensors="pt"
).pixel_values.squeeze()
except Exception as e:
print(sample)
print(f"Error: {e}")
return {}
# tokenize document
input_ids = processor.tokenizer(
sample["text"],
add_special_tokens=False,
max_length=max_length,
padding="max_length",
truncation=True,
return_tensors="pt",
)["input_ids"].squeeze(0)
labels = input_ids.clone()
labels[labels == processor.tokenizer.pad_token_id] = ignore_id # model doesn't need to predict pad token
return {"pixel_values": pixel_values, "labels": labels, "target_sequence": sample["text"]}
# need at least 32-64GB of RAM to run this
processed_dataset = proc_dataset.map(transform_and_tokenize,remove_columns=["image","text"])<jupyter_output><empty_output><jupyter_text>Before we can upload our dataset to S3 for training we want to split the dataset into train and test sets.<jupyter_code>processed_dataset = processed_dataset.train_test_split(test_size=0.1)
print(processed_dataset)<jupyter_output><empty_output><jupyter_text>After that is done we use the new [FileSystem integration](https://huggingface.co/docs/datasets/filesystems) to upload our dataset to S3. We are using the `sess.default_bucket()`, adjust this if you want to store the dataset in a different S3 bucket. We will use the S3 path later in our training script.<jupyter_code># save train_dataset to s3
training_input_path = f's3://{sess.default_bucket()}/processed/donut-sagemaker/train'
processed_dataset["train"].save_to_disk(training_input_path)
# save train_dataset to s3
test_input_path = f's3://{sess.default_bucket()}/processed/donut-sagemaker/test'
processed_dataset["test"].save_to_disk(test_input_path)
print("uploaded data to:")
print(f"training dataset to: {training_input_path}")
print(f"test dataset to: {test_input_path}")<jupyter_output><empty_output><jupyter_text>4. Fine-tune and evaluate Donut model on Amazon SageMakerAfter we have processed our dataset, we can start training our model using a Amazon SageMaker training job using the `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. The Estimator manages the infrastructure use. SagMaker takes care of starting and managing all the required ec2 instances for us, provides the correct huggingface container, uploads the provided scripts and downloads the data from our S3 bucket into the container at `/opt/ml/input/data`. Then, it starts the training job by running._Important steps we need to think of is that we extended the `DonutProcessor` earlier and added special tokens, which we need to pass through to our training script. We also need to pass the `image_size` and `max_length` to our training script._ As pretrained model we will use [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base). The `donut-base` includes only the pre-trained weights and was introduced in the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewok et al. and first released in [this repository](https://github.com/clovaai/donut).In addition to loading our model, we are resizing the `embedding` layer to match newly added tokens and adjusting the `image_size` of our encoder to match our dataset. We are also adding tokens for inference later.<jupyter_code>import time
import json
from sagemaker.huggingface import HuggingFace
# define Training Job Name
job_name = f'huggingface-donut-{time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())}'
# stingify special tokens
special_tokens = ",".join(processor.tokenizer.special_tokens_map_extended["additional_special_tokens"])
# hyperparameters, which are passed into the training job
hyperparameters = {
'model_id': model_id, # pre-trained model
'special_tokens': json.dumps(special_tokens), # special tokens which will be added to the tokenizer
'dataset_path': '/opt/ml/input/data/training', # path where sagemaker will save training dataset
'epochs': 3, # number of training epochs
'per_device_train_batch_size': 8, # batch size for training
'gradient_checkpointing': True, # batch size for training
'lr': 4e-5, # learning rate used during training
}
# create the Estimator
huggingface_estimator = HuggingFace(
entry_point = 'train.py', # train script
source_dir = 'scripts', # directory which includes all the files needed for training
instance_type = 'ml.g5.2xlarge', # instances type used for the training job
instance_count = 1, # the number of instances used for training
base_job_name = job_name, # the name of the training job
role = role, # Iam role used in training job to access AWS ressources, e.g. S3
volume_size = 100, # the size of the EBS volume in GB
transformers_version = '4.26', # the transformers version used in the training job
pytorch_version = '1.13', # the pytorch_version version used in the training job
py_version = 'py39', # the python version used in the training job
hyperparameters = hyperparameters
)<jupyter_output><empty_output><jupyter_text>Lets start the training job and wait until it is finished. This will take around 30 minutes.<jupyter_code># define a data input dictonary with our uploaded s3 uris
data = {'training': training_input_path}
# starting the train job with our uploaded datasets as input
huggingface_estimator.fit(data, wait=True)<jupyter_output><empty_output><jupyter_text>5. Deploy Donut model on Amazon SageMakerDuring the training we copied a `infernece.py` into out `model.tar.gz` which allows us now to easily deploy our model to SageMaker for inference. The [inference.py](./scripts/inference.py) implements a custom `model_fn` and `predict_fn` for our Donut model. The `model_fn` loads the model and processor and the `predict_fn` tokenizes the input and returns the prediction.<jupyter_code>from sagemaker.huggingface import HuggingFaceModel
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
model_data=huggingface_estimator.model_data,
# model_data="s3://sagemaker-us-east-1-558105141721/huggingface-donut-2023-05-18-15-15-20-2023-05-18-15-15-20-285/output/model.tar.gz"
role=role,
transformers_version="4.26",
pytorch_version="1.13",
py_version="py39",
model_server_workers=1
)<jupyter_output><empty_output><jupyter_text>Before we can deploy model with the `HuggingFaceModel` class we need to create a new serializer, which supports our image data. The Serializer are used in Predictor and in the `predict` method to serializer our data to a specific `mime-type`. The default serialzier for the `HuggingFacePredcitor` is a `JSON` serializer, but since we are not going to send text data to the endpoint we will use the DataSerializer.<jupyter_code>from sagemaker.serializers import DataSerializer
# create a serializer for the data
image_serializer = DataSerializer(content_type='image/x-image') # using x-image to support multiple image formats
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1,
instance_type= "ml.g5.2xlarge",
serializer=image_serializer, # serializer for our image files.
)<jupyter_output><empty_output><jupyter_text>SageMaker starts the deployment process by creating a SageMaker Endpoint Configuration and a SageMaker Endpoint. The Endpoint Configuration defines the model and the instance type.Lets test by using a example from the `test` split.<jupyter_code>from PIL import Image
import io
from random import randrange
from transformers.image_transforms import to_pil_image
import numpy as np
test_sample = processed_dataset["test"][randrange(0,len(processed_dataset["test"]))]
image = to_pil_image(np.array(test_sample["pixel_values"]))
def image_to_byte_array(image: Image) -> bytes:
format = image.format if image.format else 'JPEG'
# BytesIO is a file-like buffer stored in memory
img_byte_arr = io.BytesIO()
# image.save expects a file-like as a argument
image.save(img_byte_arr, format=format)
# Turn the BytesIO object back into a bytes object
return img_byte_arr.getvalue()
res = predictor.predict(data=image_to_byte_array(image))
target = processor.token2json(test_sample["target_sequence"])
print(f"Reference:\n {target}")
print(f"Prediction:\n {res}")
image.resize((350,600))<jupyter_output>Reference:
{'total': '41.87', 'date': '24/10/2017', 'company': 'GARDENIA BAKERIES (KL) SDN BHD', 'address': 'LOT 3, JALAN PELABUR 23/1, 40300 SHAH ALAM, SELANGOR.'}
Prediction:
{'total': '41.87', 'date': '24/10/2017', 'company': 'GARDENIA BAKERIES (KL) SDN BHD', 'address': 'LOT 3, JALAN PELABUR 23/1, 40300 SHAH ALAM, SELANGOR.'}<jupyter_text>Awesome!! Our fine-tuned model parsed the document correctly and extracted the right values. The next step is to evalute our model on the test set. Since the model itself is a seq2seq is not that straightforward to evaluate. To keep things simple we will use [rogue](https://huggingface.co/spaces/evaluate-metric/rouge) short for Recall-Oriented Understudy for Gisting Evaluation. This metric does not behave like the standard accuracy: it will compare a generated text against a set of reference text. The rogue score is mostly used for summarization or machine translation tasks. The higher the score the closer the generated text is to the reference text.<jupyter_code>!pip install rouge-score py7zr evaluate
import evaluate
from tqdm import tqdm
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false" # disable parallelism in tokenizers library
# Metric
rogue = evaluate.load("rouge")
predictions, references = [], []
# iterate over dataset
for sample in tqdm(processed_dataset["test"],total=len(processed_dataset["test"])):
image = to_pil_image(np.array(sample["pixel_values"]))
prediction = predictor.predict(data=image_to_byte_array(image))
reference = processor.token2json(sample["target_sequence"])
predictions.append(json.dumps(prediction))
references.append(json.dumps(reference))
# compute scores
results = rogue.compute(predictions=predictions,references=references)
print(results)
# {'rouge1': 0.8173891303548311, 'rouge2': 0.7266157117328251, 'rougeL': 0.8167726537736875, 'rougeLsum': 0.8144718562842747}<jupyter_output><empty_output><jupyter_text>Our model achieves an rogue 1 score of `81.7%` on the test set. The rogue1 refers to the overlap of unigrams (each word) between the prediction and reference._Note: The evaluation we did was very simple._In an inference test the model predicted for the `address` the value `NO. 31G&33G, JALAN SETIA INDAH X ,U13/X 40170 SETIA ALAM` and the ground truth was `'NO. 31G&33G, JALAN SETIA INDAH X,U13/X 40170 SETIA ALAM'`, where the only difference is the ` ` whitespace in between `X` and `,U13/X`.<jupyter_code>predictor.delete_model()
predictor.delete_endpoint()<jupyter_output><empty_output> | notebooks/sagemaker/26_document_ai_donut/sagemaker-notebook.ipynb/0 | {
"file_path": "notebooks/sagemaker/26_document_ai_donut/sagemaker-notebook.ipynb",
"repo_id": "notebooks",
"token_count": 7780
} | 154 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Troubleshooting
If you encounter any issue when using PEFT, please check the following list of common issues and their solutions.
## Examples don't work
Examples often rely on the most recent package versions, so please ensure they're up-to-date. In particular, check the following package versions:
- `peft`
- `transformers`
- `accelerate`
- `torch`
In general, you can update the package version by running this command inside your Python environment:
```bash
python -m pip install -U <package_name>
```
Installing PEFT from source is useful for keeping up with the latest developments:
```bash
python -m pip install git+https://github.com/huggingface/peft
```
## ValueError: Attempting to unscale FP16 gradients
This error probably occurred because the model was loaded with `torch_dtype=torch.float16` and then used in an automatic mixed precision (AMP) context, e.g. by setting `fp16=True` in the [`~transformers.Trainer`] class from 🤗 Transformers. The reason is that when using AMP, trainable weights should never use fp16. To make this work without loading the whole model in fp32, add the following to your code:
```python
peft_model = get_peft_model(...)
# add this:
for param in model.parameters():
if param.requires_grad:
param.data = param.data.float()
# proceed as usual
trainer = Trainer(model=peft_model, fp16=True, ...)
trainer.train()
```
Alternatively, you can use the [`~utils.cast_mixed_precision_params`] function to correctly cast the weights:
```python
from peft import cast_mixed_precision_params
peft_model = get_peft_model(...)
cast_mixed_precision_params(peft_model, dtype=torch.float16)
# proceed as usual
trainer = Trainer(model=peft_model, fp16=True, ...)
trainer.train()
```
## Bad results from a loaded PEFT model
There can be several reasons for getting a poor result from a loaded PEFT model which are listed below. If you're still unable to troubleshoot the problem, see if anyone else had a similar [issue](https://github.com/huggingface/peft/issues) on GitHub, and if you can't find any, open a new issue.
When opening an issue, it helps a lot if you provide a minimal code example that reproduces the issue. Also, please report if the loaded model performs at the same level as the model did before fine-tuning, if it performs at a random level, or if it is only slightly worse than expected. This information helps us identify the problem more quickly.
### Random deviations
If your model outputs are not exactly the same as previous runs, there could be an issue with random elements. For example:
1. please ensure it is in `.eval()` mode, which is important, for instance, if the model uses dropout
2. if you use [`~transformers.GenerationMixin.generate`] on a language model, there could be random sampling, so obtaining the same result requires setting a random seed
3. if you used quantization and merged the weights, small deviations are expected due to rounding errors
### Incorrectly loaded model
Please ensure that you load the model correctly. A common error is trying to load a _trained_ model with [`get_peft_model`] which is incorrect. Instead, the loading code should look like this:
```python
from peft import PeftModel, PeftConfig
base_model = ... # to load the base model, use the same code as when you trained it
config = PeftConfig.from_pretrained(peft_model_id)
peft_model = PeftModel.from_pretrained(base_model, peft_model_id)
```
### Randomly initialized layers
For some tasks, it is important to correctly configure `modules_to_save` in the config to account for randomly initialized layers.
As an example, this is necessary if you use LoRA to fine-tune a language model for sequence classification because 🤗 Transformers adds a randomly initialized classification head on top of the model. If you do not add this layer to `modules_to_save`, the classification head won't be saved. The next time you load the model, you'll get a _different_ randomly initialized classification head, resulting in completely different results.
PEFT tries to correctly guess the `modules_to_save` if you provide the `task_type` argument in the config. This should work for transformers models that follow the standard naming scheme. It is always a good idea to double check though because we can't guarantee all models follow the naming scheme.
When you load a transformers model that has randomly initialized layers, you should see a warning along the lines of:
```
Some weights of <MODEL> were not initialized from the model checkpoint at <ID> and are newly initialized: [<LAYER_NAMES>].
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
```
The mentioned layers should be added to `modules_to_save` in the config to avoid the described problem.
### Extending the vocabulary
For many language fine-tuning tasks, extending the model's vocabulary is necessary since new tokens are being introduced. This requires extending the embedding layer to account for the new tokens and also storing the embedding layer in addition to the adapter weights when saving the adapter.
Save the embedding layer by adding it to the `target_modules` of the config. The embedding layer name must follow the standard naming scheme from Transformers. For example, the Mistral config could look like this:
```python
config = LoraConfig(..., target_modules=["embed_tokens", "lm_head", "q_proj", "v_proj"])
```
Once added to `target_modules`, PEFT automatically stores the embedding layer when saving the adapter if the model has the [`~transformers.PreTrainedModel.get_input_embeddings`] and [`~transformers.PreTrainedModel.get_output_embeddings`]. This is generally the case for Transformers models.
If the model's embedding layer doesn't follow the Transformer's naming scheme, you can still save it by manually passing `save_embedding_layers=True` when saving the adapter:
```python
model = get_peft_model(...)
# train the model
model.save_adapter("my_adapter", save_embedding_layers=True)
```
For inference, load the base model first and resize it the same way you did before you trained the model. After you've resized the base model, you can load the PEFT checkpoint.
For a complete example, please check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb).
| peft/docs/source/developer_guides/troubleshooting.md/0 | {
"file_path": "peft/docs/source/developer_guides/troubleshooting.md",
"repo_id": "peft",
"token_count": 1890
} | 155 |
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Models
[`PeftModel`] is the base model class for specifying the base Transformer model and configuration to apply a PEFT method to. The base `PeftModel` contains methods for loading and saving models from the Hub.
## PeftModel
[[autodoc]] PeftModel
- all
## PeftModelForSequenceClassification
A `PeftModel` for sequence classification tasks.
[[autodoc]] PeftModelForSequenceClassification
- all
## PeftModelForTokenClassification
A `PeftModel` for token classification tasks.
[[autodoc]] PeftModelForTokenClassification
- all
## PeftModelForCausalLM
A `PeftModel` for causal language modeling.
[[autodoc]] PeftModelForCausalLM
- all
## PeftModelForSeq2SeqLM
A `PeftModel` for sequence-to-sequence language modeling.
[[autodoc]] PeftModelForSeq2SeqLM
- all
## PeftModelForQuestionAnswering
A `PeftModel` for question answering.
[[autodoc]] PeftModelForQuestionAnswering
- all
## PeftModelForFeatureExtraction
A `PeftModel` for getting extracting features/embeddings from transformer models.
[[autodoc]] PeftModelForFeatureExtraction
- all
## PeftMixedModel
A `PeftModel` for mixing different adapter types (e.g. LoRA and LoHa).
[[autodoc]] PeftMixedModel
- all
## Utilities
[[autodoc]] utils.cast_mixed_precision_params
[[autodoc]] get_peft_model
[[autodoc]] inject_adapter_in_model
[[autodoc]] utils.get_peft_model_state_dict
[[autodoc]] utils.prepare_model_for_kbit_training
| peft/docs/source/package_reference/peft_model.md/0 | {
"file_path": "peft/docs/source/package_reference/peft_model.md",
"repo_id": "peft",
"token_count": 540
} | 156 |
<jupyter_start><jupyter_code>from transformers import AutoModelForCausalLM
from peft import get_peft_config, get_peft_model, PrefixTuningConfig, TaskType, PeftType
import torch
from datasets import load_dataset
import os
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
from transformers import default_data_collator, get_linear_schedule_with_warmup
from tqdm import tqdm
from datasets import load_dataset
device = "cuda"
model_name_or_path = "bigscience/bloomz-560m"
tokenizer_name_or_path = "bigscience/bloomz-560m"
peft_config = PrefixTuningConfig(task_type=TaskType.CAUSAL_LM, num_virtual_tokens=30)
dataset_name = "twitter_complaints"
checkpoint_name = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}_v1.pt".replace(
"/", "_"
)
text_column = "Tweet text"
label_column = "text_label"
max_length = 64
lr = 3e-2
num_epochs = 50
batch_size = 8
from datasets import load_dataset
dataset = load_dataset("ought/raft", dataset_name)
classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names]
print(classes)
dataset = dataset.map(
lambda x: {"text_label": [classes[label] for label in x["Label"]]},
batched=True,
num_proc=1,
)
print(dataset)
dataset["train"][0]
# data preprocessing
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
print(target_max_length)
def preprocess_function(examples):
batch_size = len(examples[text_column])
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
targets = [str(x) for x in examples[label_column]]
model_inputs = tokenizer(inputs)
labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id]
# print(i, sample_input_ids, label_input_ids)
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
# print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["train"]
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
def test_preprocess_function(examples):
batch_size = len(examples[text_column])
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
model_inputs = tokenizer(inputs)
# print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
return model_inputs
test_dataset = dataset["test"].map(
test_preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
next(iter(test_dataloader))
next(iter(train_dataloader))
len(test_dataloader)
next(iter(test_dataloader))
# creating model
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
model.print_trainable_parameters()
model
model.peft_config
# model
# optimizer and lr scheduler
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# training and evaluation
model = model.to(device)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
# print(batch)
# print(batch["input_ids"].shape)
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
eval_preds.extend(
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
)
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
model.eval()
i = 16
inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt")
print(dataset["test"][i]["Tweet text"])
print(inputs)
with torch.no_grad():
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(
input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3
)
print(outputs)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>Hey @nytimes your link to cancel my subscription isn't working and nobody is answering the chat. Please don't play that kind of stupid game.
{'input_ids': tensor([[227985, 5484, 915, 54078, 2566, 7782, 24502, 2632, 8989,
427, 36992, 2670, 140711, 21994, 10789, 530, 88399, 632,
183542, 368, 44799, 17, 29901, 5926, 7229, 861, 11596,
461, 78851, 14775, 17, 77658, 915, 210]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}
tensor([[227985, 5484, 915, 54078, 2566, 7782, 24502, 2632, 8989,
427, 36992, 2670, 140711, 21994, 10789, 530, 88399, 632,
183542, 368, 44799, 17, 29901, 5926, 7229, 861, 11596,
461, 78851, 14775, 17, 77658, 915, 210, 16449, 5952,
3]], device='cuda:0')
["Tweet text : Hey @nytimes your [...]<jupyter_text>You can push model to hub or save model locally. - Option1: Pushing the model to Hugging Face Hub```pythonmodel.push_to_hub( f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), token = "hf_...")```token (`bool` or `str`, *optional*): `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. Or you can get your token from https://huggingface.co/settings/token```- Or save model locally```pythonpeft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_")model.save_pretrained(peft_model_id)```<jupyter_code># saving model
peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace(
"/", "_"
)
model.save_pretrained(peft_model_id)
ckpt = f"{peft_model_id}/adapter_model.bin"
!du -h $ckpt
from peft import PeftModel, PeftConfig
peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace(
"/", "_"
)
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
model.to(device)
model.eval()
i = 4
inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt")
print(dataset["test"][i]["Tweet text"])
print(inputs)
with torch.no_grad():
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(
input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3
)
print(outputs)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>@greateranglia Ok thanks...
{'input_ids': tensor([[227985, 5484, 915, 2566, 14173, 2960, 29906, 387, 20706,
49337, 1369, 77658, 915, 210]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}
tensor([[227985, 5484, 915, 2566, 14173, 2960, 29906, 387, 20706,
49337, 1369, 77658, 915, 210, 1936, 106863, 3]],
device='cuda:0')
['Tweet text : @greateranglia Ok thanks... Label : no complaint'] | peft/examples/causal_language_modeling/peft_prefix_tuning_clm.ipynb/0 | {
"file_path": "peft/examples/causal_language_modeling/peft_prefix_tuning_clm.ipynb",
"repo_id": "peft",
"token_count": 4714
} | 157 |
<jupyter_start><jupyter_code>import argparse
import json
import logging
import math
import os
import random
from pathlib import Path
from tqdm import tqdm
import datasets
from datasets import load_dataset, DatasetDict
import evaluate
import torch
from torch import nn
from torch.utils.data import DataLoader
import transformers
from transformers import AutoTokenizer, AutoModel, default_data_collator, SchedulerType, get_scheduler
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version
from huggingface_hub import Repository, create_repo
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from peft import PeftModel
import hnswlib
class AutoModelForSentenceEmbedding(nn.Module):
def __init__(self, model_name, tokenizer, normalize=True):
super(AutoModelForSentenceEmbedding, self).__init__()
self.model = AutoModel.from_pretrained(model_name) # , quantizaton_config=BitsAndBytesConfig(load_in_8bit=True), device_map={"":0})
self.normalize = normalize
self.tokenizer = tokenizer
def forward(self, **kwargs):
model_output = self.model(**kwargs)
embeddings = self.mean_pooling(model_output, kwargs["attention_mask"])
if self.normalize:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def get_cosing_embeddings(query_embs, product_embs):
return torch.sum(query_embs * product_embs, axis=1)
model_name_or_path = "intfloat/e5-large-v2"
peft_model_id = "smangrul/peft_lora_e5_semantic_search"
dataset_name = "smangrul/amazon_esci"
max_length = 70
batch_size = 256
import pandas as pd
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
dataset = load_dataset(dataset_name)
train_product_dataset = dataset["train"].to_pandas()[["product_title"]]
val_product_dataset = dataset["validation"].to_pandas()[["product_title"]]
product_dataset_for_indexing = pd.concat([train_product_dataset, val_product_dataset])
product_dataset_for_indexing = product_dataset_for_indexing.drop_duplicates()
product_dataset_for_indexing.reset_index(drop=True, inplace=True)
product_dataset_for_indexing.reset_index(inplace=True)
product_dataset_for_indexing
pd.set_option("max_colwidth", 300)
product_dataset_for_indexing.sample(10)
from datasets import Dataset
dataset = Dataset.from_pandas(product_dataset_for_indexing)
def preprocess_function(examples):
products = examples["product_title"]
result = tokenizer(products, padding="max_length", max_length=70, truncation=True)
return result
processed_dataset = dataset.map(
preprocess_function,
batched=True,
remove_columns=dataset.column_names,
desc="Running tokenizer on dataset",
)
processed_dataset
# base model
model = AutoModelForSentenceEmbedding(model_name_or_path, tokenizer)
# peft config and wrapping
model = PeftModel.from_pretrained(model, peft_model_id)
print(model)
dataloader = DataLoader(
processed_dataset,
shuffle=False,
collate_fn=default_data_collator,
batch_size=batch_size,
pin_memory=True,
)
next(iter(dataloader))
ids_to_products_dict = {i: p for i, p in zip(dataset["index"], dataset["product_title"])}
ids_to_products_dict
device = "cuda"
model.to(device)
model.eval()
model = model.merge_and_unload()
import numpy as np
num_products = len(dataset)
d = 1024
product_embeddings_array = np.zeros((num_products, d))
for step, batch in enumerate(tqdm(dataloader)):
with torch.no_grad():
with torch.amp.autocast(dtype=torch.bfloat16, device_type="cuda"):
product_embs = model(**{k: v.to(device) for k, v in batch.items()}).detach().float().cpu()
start_index = step * batch_size
end_index = start_index + batch_size if (start_index + batch_size) < num_products else num_products
product_embeddings_array[start_index:end_index] = product_embs
del product_embs, batch
def construct_search_index(dim, num_elements, data):
# Declaring index
search_index = hnswlib.Index(space="ip", dim=dim) # possible options are l2, cosine or ip
# Initializing index - the maximum number of elements should be known beforehand
search_index.init_index(max_elements=num_elements, ef_construction=200, M=100)
# Element insertion (can be called several times):
ids = np.arange(num_elements)
search_index.add_items(data, ids)
return search_index
product_search_index = construct_search_index(d, num_products, product_embeddings_array)
def get_query_embeddings(query, model, tokenizer, device):
inputs = tokenizer(query, padding="max_length", max_length=70, truncation=True, return_tensors="pt")
model.eval()
with torch.no_grad():
query_embs = model(**{k: v.to(device) for k, v in inputs.items()}).detach().cpu()
return query_embs[0]
def get_nearest_neighbours(k, search_index, query_embeddings, ids_to_products_dict, threshold=0.7):
# Controlling the recall by setting ef:
search_index.set_ef(100) # ef should always be > k
# Query dataset, k - number of the closest elements (returns 2 numpy arrays)
labels, distances = search_index.knn_query(query_embeddings, k=k)
return [
(ids_to_products_dict[label], (1 - distance))
for label, distance in zip(labels[0], distances[0])
if (1 - distance) >= threshold
]
query = "NLP and ML books"
k = 10
query_embeddings = get_query_embeddings(query, model, tokenizer, device)
search_results = get_nearest_neighbours(k, product_search_index, query_embeddings, ids_to_products_dict, threshold=0.7)
print(f"{query=}")
for product, cosine_sim_score in search_results:
print(f"cosine_sim_score={round(cosine_sim_score,2)} {product=}")<jupyter_output>query='NLP and ML books'
cosine_sim_score=0.92 product='Machine Learning: A Journey from Beginner to Advanced Including Deep Learning, Scikit-learn and Tensorflow'
cosine_sim_score=0.91 product='Mastering Machine Learning with scikit-learn'
cosine_sim_score=0.91 product='Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems'
cosine_sim_score=0.91 product='Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems'
cosine_sim_score=0.91 product='Practical Deep Learning: A Python-Based Introduction'
cosine_sim_score=0.9 product='Machine Learning: A Hands-On, Project-Based Introduction to Machine Learning for Absolute Beginners: Mastering Engineering ML Systems using Scikit-Learn and TensorFlow'
cosine_sim_score=0.9 product='Mastering Machine Learning with scikit-learn - Second Edition: Apply effective learning algorithms to real-world problems using sci[...] | peft/examples/feature_extraction/peft_lora_embedding_semantic_similarity_inference.ipynb/0 | {
"file_path": "peft/examples/feature_extraction/peft_lora_embedding_semantic_similarity_inference.ipynb",
"repo_id": "peft",
"token_count": 2675
} | 158 |
<jupyter_start><jupyter_code>!git clone https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth
%cd "peft-lora-sd-dreambooth"
!pip install -r requirements.txt
!python colab.py<jupyter_output><empty_output> | peft/examples/lora_dreambooth/colab_notebook.ipynb/0 | {
"file_path": "peft/examples/lora_dreambooth/colab_notebook.ipynb",
"repo_id": "peft",
"token_count": 91
} | 159 |
<jupyter_start><jupyter_code>import argparse
import os
import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
import peft
import evaluate
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from tqdm import tqdm
batch_size = 8
model_name_or_path = "roberta-large"
task = "mrpc"
peft_type = peft.PeftType.IA3
device = "cuda"
num_epochs = 12
# peft_config = LoraConfig(task_type="SEQ_CLS", inference_mode=False, r=8, lora_alpha=16, lora_dropout=0.1)
peft_config = peft.IA3Config(task_type="SEQ_CLS", inference_mode=False)
lr = 1e-3
if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")):
padding_side = "left"
else:
padding_side = "right"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side)
if getattr(tokenizer, "pad_token_id") is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
datasets = load_dataset("glue", task)
metric = evaluate.load("glue", task)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size
)
test_dataloader = DataLoader(tokenized_datasets["test"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size)
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True)
model = peft.get_peft_model(model, peft_config)
model.print_trainable_parameters()
model
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs),
num_training_steps=(len(train_dataloader) * num_epochs),
)
model.to(device)
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(tqdm(train_dataloader)):
batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader)):
batch.to(device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = predictions, batch["labels"]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/459 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
100%|██████████| 459/459 [01:41<00:00, 4.52it/s]
100%|██████████| 51/51 [00:05<00:00, 8.89it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>model.push_to_hub("SumanthRH/roberta-large-peft-ia3", use_auth_token=True)<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
peft_model_id = "SumanthRH/roberta-large-peft-ia3"
config = PeftConfig.from_pretrained(peft_model_id)
inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
# Load the Lora model
inference_model = PeftModel.from_pretrained(inference_model, peft_model_id)
inference_model.to(device)
inference_model.eval()
for step, batch in enumerate(tqdm(eval_dataloader)):
batch.to(device)
with torch.no_grad():
outputs = inference_model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = predictions, batch["labels"]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
print(eval_metric)<jupyter_output><empty_output> | peft/examples/sequence_classification/IA3.ipynb/0 | {
"file_path": "peft/examples/sequence_classification/IA3.ipynb",
"repo_id": "peft",
"token_count": 1903
} | 160 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
VERSION = "0.9.1.dev0"
extras = {}
extras["quality"] = [
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434
"hf-doc-builder",
"ruff~=0.2.1",
]
extras["docs_specific"] = [
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434
"hf-doc-builder",
]
extras["dev"] = extras["quality"] + extras["docs_specific"]
extras["test"] = extras["dev"] + [
"pytest",
"pytest-cov",
"pytest-xdist",
"parameterized",
"datasets",
"diffusers<0.21.0",
"scipy",
]
setup(
name="peft",
version=VERSION,
description="Parameter-Efficient Fine-Tuning (PEFT)",
license_files=["LICENSE"],
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="deep learning",
license="Apache",
author="The HuggingFace team",
author_email="[email protected]",
url="https://github.com/huggingface/peft",
package_dir={"": "src"},
packages=find_packages("src"),
package_data={"peft": ["py.typed"]},
entry_points={},
python_requires=">=3.8.0",
install_requires=[
"numpy>=1.17",
"packaging>=20.0",
"psutil",
"pyyaml",
"torch>=1.13.0",
"transformers",
"tqdm",
"accelerate>=0.21.0",
"safetensors",
"huggingface_hub>=0.17.0",
],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
# Release checklist
# 1. Change the version in __init__.py and setup.py to the release version, e.g. from "0.6.0.dev0" to "0.6.0"
# 2. Check if there are any deprecations that need to be addressed for this release by searching for "# TODO" in the code
# 3. Commit these changes with the message: "Release: VERSION", create a PR and merge it.
# 4. Add a tag in git to mark the release: "git tag -a VERSION -m 'Adds tag VERSION for pypi' "
# Push the tag to git:
# git push --tags origin main
# It is necessary to work on the original repository, not on a fork.
# 5. Run the following commands in the top-level directory:
# python setup.py bdist_wheel
# python setup.py sdist
# Ensure that you are on the clean and up-to-date main branch (git status --untracked-files=no should not list any
# files and show the main branch)
# 6. Upload the package to the pypi test server first:
# twine upload dist/* -r pypitest
# 7. Check that you can install it in a virtualenv by running:
# pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple peft
# 8. Upload the final version to actual pypi:
# twine upload dist/* -r pypi
# 9. Add release notes to the tag on https://github.com/huggingface/peft/releases once everything is looking hunky-dory.
# Check the notes here: https://docs.google.com/document/d/1k-sOIfykuKjWcOIALqjhFKz4amFEp-myeJUJEzNgjoU/edit?usp=sharing
# 10. Update the version in __init__.py, setup.py to the bumped minor version + ".dev0" (e.g. from "0.6.0" to "0.7.0.dev0")
| peft/setup.py/0 | {
"file_path": "peft/setup.py",
"repo_id": "peft",
"token_count": 1546
} | 161 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
from transformers.pytorch_utils import Conv1D
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.lora import LoraConfig, LoraModel
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
_freeze_adapter,
_get_submodules,
get_auto_gptq_quant_linear,
get_quantization_config,
)
from .gptq import SVDQuantLinear
from .layer import AdaLoraLayer, RankAllocator, SVDLinear
class AdaLoraModel(LoraModel):
"""
Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
https://openreview.net/forum?id=lq62uWRJjiY
Args:
model ([`transformers.PreTrainedModel`]): The model to be adapted.
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The AdaLora model.
Example::
>>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
>>> config = AdaLoraConfig(
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
lora_dropout=0.01,
)
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default")
**Attributes**:
- **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
"""
# Note: don't redefine prefix here, it should be inherited from LoraModel
def __init__(self, model, config, adapter_name):
super().__init__(model, config, adapter_name)
traininable_mode_counter = 0
for config in self.peft_config.values():
if not config.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
"AdaLoraModel supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
)
if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
else:
self.trainable_adapter_name = adapter_name
self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
def _check_new_adapter_config(self, config: LoraConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
super()._check_new_adapter_config(config)
traininable_mode_counter = 0
for config_ in self.peft_config.values():
if not config_.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
f"{self.__class__.__name__} supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one "
"you want to train."
)
def _create_and_replace(
self,
lora_config,
adapter_name,
target,
target_name,
parent,
current_key,
):
kwargs = {
"r": lora_config.init_r,
"lora_alpha": lora_config.lora_alpha,
"lora_dropout": lora_config.lora_dropout,
"fan_in_fan_out": lora_config.fan_in_fan_out,
"init_lora_weights": lora_config.init_lora_weights,
"loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
"loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
}
if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available():
raise ImportError(
"To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. "
"You can install it with `pip install bitsandbytes`."
)
quantization_config = get_quantization_config(self.model, method="gptq")
if quantization_config is not None:
kwargs["gptq_quantization_config"] = quantization_config
# If it is not an AdaLoraLayer, create a new module, else update it with new adapters
if not isinstance(target, AdaLoraLayer):
new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
if adapter_name != self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(
adapter_name,
lora_config.init_r,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
@staticmethod
def _create_new_module(lora_config, adapter_name, target, **kwargs):
# avoid eager bnb import
if is_bnb_available():
import bitsandbytes as bnb
from .bnb import SVDLinear8bitLt
if is_bnb_4bit_available():
from .bnb import SVDLinear4bit
gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
kwargs.update(
{
"has_fp16_weights": target_base_layer.state.has_fp16_weights,
"memory_efficient_backward": target_base_layer.state.memory_efficient_backward,
"threshold": target_base_layer.state.threshold,
"index": target_base_layer.index,
}
)
new_module = SVDLinear8bitLt(target, adapter_name, **kwargs)
elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs)
elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear):
new_module = SVDQuantLinear(target, adapter_name, **kwargs)
else:
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(target, adapter_name, **kwargs)
return new_module
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
model_config["model_type"]
]
return peft_config
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor):
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
if orth_reg_weight <= 0:
raise ValueError("orth_reg_weight should be greater than 0. ")
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
if num_param > 0:
regu_loss = regu_loss / num_param
else:
regu_loss = 0
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexpected type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
"""
This method updates Adalora budget and mask.
This should be called in every training step after `loss.backward()` and before `zero_grad()`.
`tinit`, `tfinal` and `deltaT` are handled with in the method.
Args:
global_step (`int`): The current training step, it is used to calculate adalora budget.
Example:
```python
>>> loss = model(**input).loss
>>> loss.backward()
>>> optimizer.step()
>>> model.base_model.update_and_allocate(i_step)
>>> optimizer.zero_grad()
```
"""
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
| peft/src/peft/tuners/adalora/model.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/model.py",
"repo_id": "peft",
"token_count": 7189
} | 162 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.tuners.prompt_tuning import PromptTuningConfig
from peft.utils import PeftType
class MultitaskPromptTuningInit(str, enum.Enum):
# initialize prompt with text
TEXT = "TEXT"
# initialize prompt with random matrix
RANDOM = "RANDOM"
# average the prefix and column matrices obtained during source training
AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
# pick prefix and column matrices for a particular task obtained during source training
EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
# only use the prompt embeddings trained during source training
ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
@dataclass
class MultitaskPromptTuningConfig(PromptTuningConfig):
prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
default=MultitaskPromptTuningInit.RANDOM,
metadata={
"help": (
"How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
"EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
),
},
)
prompt_tuning_init_state_dict_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The path of source state dict. This is required when training the downstream target prompt from "
"the pretrained source prompt"
),
},
)
prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
def __post_init__(self):
self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
| peft/src/peft/tuners/multitask_prompt_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/multitask_prompt_tuning/config.py",
"repo_id": "peft",
"token_count": 883
} | 163 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py
# with some refactor
import torch
class PrefixEncoder(torch.nn.Module):
r"""
The `torch.nn` model to encode the prefix.
Args:
config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.
Example:
```py
>>> from peft import PrefixEncoder, PrefixTuningConfig
>>> config = PrefixTuningConfig(
... peft_type="PREFIX_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_hidden_size=768,
... )
>>> prefix_encoder = PrefixEncoder(config)
```
**Attributes**:
- **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.
- **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if
`prefix_projection` is `True`.
- **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.
Input shape: (`batch_size`, `num_virtual_tokens`)
Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)
"""
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
token_dim = config.token_dim
num_layers = config.num_layers
encoder_hidden_size = config.encoder_hidden_size
num_virtual_tokens = config.num_virtual_tokens
if self.prefix_projection and not config.inference_mode:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)
self.transform = torch.nn.Sequential(
torch.nn.Linear(token_dim, encoder_hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),
)
else:
self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.transform(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
| peft/src/peft/tuners/prefix_tuning/model.py/0 | {
"file_path": "peft/src/peft/tuners/prefix_tuning/model.py",
"repo_id": "peft",
"token_count": 1228
} | 164 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
import tempfile
import unittest
from unittest import TestCase
import pytest
import torch
from torch.testing import assert_close
from peft.mapping import get_peft_model
from peft.peft_model import PeftModel
from peft.tuners.adaption_prompt import AdaptionPromptConfig
from peft.utils.other import prepare_model_for_int8_training
from peft.utils.save_and_load import get_peft_model_state_dict
from tests.testing_common import PeftCommonTester
def is_llama_available() -> bool:
"""Check if Llama is available in the transformers library (it's not in earlier versions)."""
try:
return importlib.util.find_spec("transformers.models.llama.modeling_llama") is not None
except ModuleNotFoundError:
return False
def is_mistral_available() -> bool:
"""Check if mistral is available in the transformers library (it's not in earlier versions)."""
try:
return importlib.util.find_spec("transformers.models.mistral.modeling_mistral") is not None
except ModuleNotFoundError:
return False
if is_llama_available():
# We guard the import statement so that our unit tests will pass in CI environments
# that don't have a transformers package with Llama.
from transformers import LlamaConfig, LlamaForCausalLM, LlamaModel
if is_mistral_available():
# We guard the import statement so that our unit tests will pass in CI environments
# that don't have a transformers package with Mistral.
from transformers import MistralConfig, MistralForCausalLM, MistralModel
class AdaptionPromptTester(TestCase, PeftCommonTester):
"""
Tests for the AdaptionPrompt model.
Some of these tests were adapted from `test_peft_model.py` (which has been refactored since), but since we haven't
checked in the test checkpoints for Llama into `hf-internal-testing`, we separate them for now.
"""
def setUp(self):
# Check that llama is available in transformers package before running each test.
if not is_llama_available():
self.skipTest("Llama not available in transformers. Skipping all tests.")
else:
# Check for Mistral's availability. It might or might not be available.
self.mistral_available = is_mistral_available()
@staticmethod
def _create_test_llama_config():
"""Create a test config for a small Llama model for testing."""
return LlamaConfig(
vocab_size=16,
hidden_size=8,
intermediate_size=8,
num_hidden_layers=8,
num_attention_heads=4,
use_cache=False,
)
@staticmethod
def _create_test_mistral_config():
"""Create a test config for a small Mistral model for testing."""
return MistralConfig(
vocab_size=16,
hidden_size=8,
intermediate_size=8,
num_hidden_layers=8,
num_attention_heads=4,
num_key_value_heads=2,
use_cache=False,
)
def test_attributes(self) -> None:
model = LlamaModel(self._create_test_llama_config())
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4)
model = get_peft_model(model, config)
assert hasattr(model, "save_pretrained")
assert hasattr(model, "from_pretrained")
assert hasattr(model, "push_to_hub")
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_attributes_mistral(self) -> None:
model_mistral = MistralModel(self._create_test_mistral_config())
config_mistral = AdaptionPromptConfig(adapter_layers=1, adapter_len=4)
model_mistral = get_peft_model(model_mistral, config_mistral)
assert hasattr(model_mistral, "save_pretrained")
assert hasattr(model_mistral, "from_pretrained")
assert hasattr(model_mistral, "push_to_hub")
def test_prepare_for_training(self) -> None:
# Test Llama
model = LlamaForCausalLM(self._create_test_llama_config())
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config)
model = model.to(self.torch_device)
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
dummy_output = model.get_input_embeddings()(dummy_input)
assert not dummy_output.requires_grad
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_prepare_for_training_mistral(self) -> None:
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
config_mistral = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
model_mistral = get_peft_model(model_mistral, config_mistral)
model_mistral = model_mistral.to(self.torch_device)
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
dummy_output = model_mistral.get_input_embeddings()(dummy_input)
assert not dummy_output.requires_grad
def test_prepare_for_int8_training(self) -> None:
model = LlamaForCausalLM(self._create_test_llama_config())
model = prepare_model_for_int8_training(model)
model = model.to(self.torch_device)
for param in model.parameters():
assert not param.requires_grad
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config)
# For backward compatibility
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
dummy_output = model.get_input_embeddings()(dummy_input)
assert dummy_output.requires_grad
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_prepare_model_for_int8_training_mistral(self) -> None:
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
model_mistral = prepare_model_for_int8_training(model_mistral)
model_mistral = model_mistral.to(self.torch_device)
for param in model_mistral.parameters():
assert not param.requires_grad
config_mistral = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
model_mistral = get_peft_model(model_mistral, config_mistral)
# For backward compatibility
if hasattr(model_mistral, "enable_input_require_grads"):
model_mistral.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model_mistral.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
dummy_output = model_mistral.get_input_embeddings()(dummy_input)
assert dummy_output.requires_grad
def test_save_pretrained_regression(self) -> None:
seed = 420
torch.manual_seed(seed)
model = LlamaForCausalLM(self._create_test_llama_config())
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config)
model = model.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname, safe_serialization=False)
torch.manual_seed(seed)
model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config())
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
# check if the state dicts are equal
state_dict = get_peft_model_state_dict(model)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained)
# check if same keys
assert state_dict.keys() == state_dict_from_pretrained.keys()
# Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
assert len(state_dict) == 4
# check if tensors equal
for key in state_dict.keys():
assert torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
# check if `adapter_model.bin` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))
# check if `adapter_config.json` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
# check if `model.safetensors` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
# check if `config.json` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_save_pretrained_regression_mistral(self) -> None:
seed = 420
torch.manual_seed(seed)
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model_mistral = get_peft_model(model_mistral, config_mistral)
model_mistral = model_mistral.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
model_mistral.save_pretrained(tmp_dirname, safe_serialization=False)
torch.manual_seed(seed)
model_from_pretrained_mistral = MistralForCausalLM(self._create_test_mistral_config())
model_from_pretrained_mistral = PeftModel.from_pretrained(model_from_pretrained_mistral, tmp_dirname)
# check if the state dicts are equal
state_dict = get_peft_model_state_dict(model_mistral)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained_mistral)
# check if same keys
assert state_dict.keys() == state_dict_from_pretrained.keys()
# Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
assert len(state_dict) == 4
# check if tensors equal
for key in state_dict.keys():
assert torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
# check if `adapter_model.bin` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))
# check if `adapter_config.json` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
# check if `model.safetensors` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
# check if `config.json` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
def test_save_pretrained(self) -> None:
seed = 420
torch.manual_seed(seed)
model = LlamaForCausalLM(self._create_test_llama_config())
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config)
model = model.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
torch.manual_seed(seed)
model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config())
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
# check if the state dicts are equal
state_dict = get_peft_model_state_dict(model)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained)
# check if same keys
assert state_dict.keys() == state_dict_from_pretrained.keys()
# Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
assert len(state_dict) == 4
# check if tensors equal
for key in state_dict.keys():
assert torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
# check if `adapter_model.bin` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors"))
# check if `adapter_config.json` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
# check if `model.safetensors` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
# check if `config.json` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_save_pretrained_mistral(self) -> None:
seed = 420
torch.manual_seed(seed)
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model_mistral = get_peft_model(model_mistral, config_mistral)
model_mistral = model_mistral.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
model_mistral.save_pretrained(tmp_dirname)
torch.manual_seed(seed)
model_from_pretrained_mistral = MistralForCausalLM(self._create_test_mistral_config())
model_from_pretrained_mistral = PeftModel.from_pretrained(model_from_pretrained_mistral, tmp_dirname)
# check if the state dicts are equal
state_dict = get_peft_model_state_dict(model_mistral)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained_mistral)
# check if same keys
assert state_dict.keys() == state_dict_from_pretrained.keys()
# Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
assert len(state_dict) == 4
# check if tensors equal
for key in state_dict.keys():
assert torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
# check if `adapter_model.bin` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors"))
# check if `adapter_config.json` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
# check if `model.safetensors` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
# check if `config.json` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
def test_save_pretrained_selected_adapters(self) -> None:
seed = 420
torch.manual_seed(seed)
model = LlamaForCausalLM(self._create_test_llama_config())
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config)
model = model.to(self.torch_device)
new_adapter_config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model.add_adapter("new_adapter", new_adapter_config)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
torch.manual_seed(seed)
model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config())
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
model_from_pretrained.load_adapter(tmp_dirname, "new_adapter")
# check if the state dicts are equal
state_dict = get_peft_model_state_dict(model)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained)
# check if same keys
assert state_dict.keys() == state_dict_from_pretrained.keys()
# Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
assert len(state_dict) == 4
# check if tensors equal
for key in state_dict.keys():
assert torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
# check if `adapter_model.bin` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors"))
# check if `adapter_config.json` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
# check if `model.safetensors` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
# check if `config.json` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_save_pretrained_selected_adapters_mistral(self) -> None:
seed = 420
torch.manual_seed(seed)
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model_mistral = get_peft_model(model_mistral, config_mistral)
model_mistral = model_mistral.to(self.torch_device)
new_adapter_config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model_mistral.add_adapter("new_adapter", new_adapter_config_mistral)
with tempfile.TemporaryDirectory() as tmp_dirname:
model_mistral.save_pretrained(tmp_dirname)
torch.manual_seed(seed)
model_from_pretrained_mistral = MistralForCausalLM(self._create_test_mistral_config())
model_from_pretrained_mistral = PeftModel.from_pretrained(model_from_pretrained_mistral, tmp_dirname)
model_from_pretrained_mistral.load_adapter(tmp_dirname, "new_adapter")
# check if the state dicts are equal
state_dict = get_peft_model_state_dict(model_mistral)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained_mistral)
# check if same keys
assert state_dict.keys() == state_dict_from_pretrained.keys()
# Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate).
assert len(state_dict) == 4
# check if tensors equal
for key in state_dict.keys():
assert torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
# check if `adapter_model.bin` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors"))
# check if `adapter_config.json` is present
assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))
# check if `model.safetensors` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))
# check if `config.json` is not present
assert not os.path.exists(os.path.join(tmp_dirname, "config.json"))
def test_generate(self) -> None:
model = LlamaForCausalLM(self._create_test_llama_config())
config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config)
model = model.to(self.torch_device)
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# check if `generate` works
_ = model.generate(input_ids=input_ids, attention_mask=attention_mask)
# check if `generate` works if positional arguments are passed
_ = model.generate(input_ids, attention_mask=attention_mask)
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_generate_mistral(self) -> None:
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
config_mistral = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
model_mistral = get_peft_model(model_mistral, config_mistral)
model_mistral = model_mistral.to(self.torch_device)
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# check if `generate` works
_ = model_mistral.generate(input_ids=input_ids, attention_mask=attention_mask)
# check if `generate` works if positional arguments are passed
_ = model_mistral.generate(input_ids, attention_mask=attention_mask)
def test_sequence_adapter_ops(self) -> None:
"""Test sequence of adapter operations."""
# Test input data.
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# Create original llama model.
original = LlamaForCausalLM(self._create_test_llama_config())
original = original.to(self.torch_device)
original_before = original(input_ids=input_ids, attention_mask=attention_mask)
# Get AdaptionPrompt model.
adapted = get_peft_model(
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted = adapted.to(self.torch_device)
default_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
# Test zero-init: The logits should be exactly the same.
assert_close(original_before.logits, default_before.logits, rtol=0, atol=0)
# Single fine-tuning step on "default" adapter.
optimizer = torch.optim.SGD(adapted.parameters(), lr=1)
optimizer.zero_grad()
default_before.loss.backward()
optimizer.step()
# Test that the output changed.
default_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert not torch.allclose(default_before.logits, default_after.logits)
with adapted.disable_adapter():
# Test that the output is the same as the original output.
default_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0)
# Add new adapter 1.
adapted.add_adapter("adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM"))
# Test zero-init
adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0)
# Single fine-tuning step on adapter 1.
optimizer = torch.optim.SGD(adapted.parameters(), lr=1)
optimizer.zero_grad()
adapter_1_before.loss.backward()
optimizer.step()
# Test that adapter 1 output changed.
adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert not torch.allclose(adapter_1_before.logits, adapter_1_after.logits)
assert not torch.allclose(original_before.logits, adapter_1_after.logits)
assert not torch.allclose(default_after.logits, adapter_1_after.logits)
with adapted.disable_adapter():
# Test that the output is the same as the original output.
adapter_1_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0)
# Set adapter back to default.
adapted.set_adapter("default")
# Test that the output is the same as the default output after training.
default_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0)
assert not torch.allclose(original_before.logits, default_after_set.logits)
assert not torch.allclose(adapter_1_after.logits, default_after_set.logits)
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_sequence_adapter_ops_mistral(self) -> None:
# Test input data.
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# Create original mistral model.
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
model_mistral = model_mistral.to(self.torch_device)
original_before = model_mistral(input_ids=input_ids, attention_mask=attention_mask)
# Get AdaptionPrompt model.
adapted_mistral = get_peft_model(
model_mistral, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted_mistral = adapted_mistral.to(self.torch_device)
default_before = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
# Test zero-init: The logits should be exactly the same.
assert_close(original_before.logits, default_before.logits, rtol=0, atol=0)
# Single fine-tuning step on "default" adapter.
optimizer = torch.optim.SGD(adapted_mistral.parameters(), lr=1)
optimizer.zero_grad()
default_before.loss.backward()
optimizer.step()
# Test that the output changed.
default_after = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert not torch.allclose(default_before.logits, default_after.logits)
with adapted_mistral.disable_adapter():
# Test that the output is the same as the original output.
default_disabled = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0)
# Add new adapter 1.
adapted_mistral.add_adapter(
"adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM")
)
# Test zero-init
adapter_1_before = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0)
# Single fine-tuning step on adapter 1.
optimizer = torch.optim.SGD(adapted_mistral.parameters(), lr=1)
optimizer.zero_grad()
adapter_1_before.loss.backward()
optimizer.step()
# Test that adapter 1 output changed.
adapter_1_after = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert not torch.allclose(adapter_1_before.logits, adapter_1_after.logits)
assert not torch.allclose(original_before.logits, adapter_1_after.logits)
assert not torch.allclose(default_after.logits, adapter_1_after.logits)
with adapted_mistral.disable_adapter():
# Test that the output is the same as the original output.
adapter_1_disabled = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0)
# Set adapter back to default.
adapted_mistral.set_adapter("default")
# Test that the output is the same as the default output after training.
default_after_set = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0)
assert not torch.allclose(original_before.logits, default_after_set.logits)
assert not torch.allclose(adapter_1_after.logits, default_after_set.logits)
def test_add_and_set_while_disabled(self):
"""Test that adding and setting adapters while disabled works as intended."""
# Test input data.
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# Create original llama model.
original = LlamaForCausalLM(self._create_test_llama_config())
original = original.to(self.torch_device)
original_before = original(input_ids=input_ids, attention_mask=attention_mask)
# Get AdaptionPrompt model.
adapted = get_peft_model(
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted = adapted.to(self.torch_device)
with adapted.disable_adapter():
adapted.add_adapter(
"adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM")
)
# Test that the output is the same as the original output.
adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0)
# Single fine-tuning step on adapter 1.
optimizer = torch.optim.SGD(adapted.parameters(), lr=1)
optimizer.zero_grad()
adapter_1_before.loss.backward()
optimizer.step()
# Test that adapter 1 output changed.
adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert not torch.allclose(original_before.logits, adapter_1_after.logits)
adapted.set_adapter("default")
with adapted.disable_adapter():
adapted.set_adapter("adapter 1")
# Test that adapter 1 is active again.
adapter_1_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0)
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_add_and_set_while_disabled_mistral(self):
# Test input data.
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# Create original mistral model.
model_mistral = MistralForCausalLM(self._create_test_mistral_config())
model_mistral = model_mistral.to(self.torch_device)
original_before = model_mistral(input_ids=input_ids, attention_mask=attention_mask)
# Get AdaptionPrompt model.
adapted_mistral = get_peft_model(
model_mistral, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted_mistral = adapted_mistral.to(self.torch_device)
with adapted_mistral.disable_adapter():
adapted_mistral.add_adapter(
"adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM")
)
# Test that the output is the same as the original output.
adapter_1_before = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0)
# Single fine-tuning step on adapter 1.
optimizer = torch.optim.SGD(adapted_mistral.parameters(), lr=1)
optimizer.zero_grad()
adapter_1_before.loss.backward()
optimizer.step()
# Test that adapter 1 output changed.
adapter_1_after = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert not torch.allclose(original_before.logits, adapter_1_after.logits)
adapted_mistral.set_adapter("default")
with adapted_mistral.disable_adapter():
adapted_mistral.set_adapter("adapter 1")
# Test that adapter 1 is active again.
adapter_1_after_set = adapted_mistral(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids)
assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0)
def test_use_cache(self) -> None:
"""Test that AdaptionPrompt works when Llama config use_cache=True."""
torch.manual_seed(0)
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
original = LlamaForCausalLM(
LlamaConfig(
vocab_size=16,
hidden_size=8,
intermediate_size=8,
num_hidden_layers=8,
num_attention_heads=4,
use_cache=False,
)
).eval()
adapted = get_peft_model(
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted = adapted.to(self.torch_device)
expected = adapted.generate(input_ids=input_ids, max_length=8)
# Set use_cache = True and generate output again.
adapted.base_model.config.use_cache = True
actual = adapted.generate(input_ids=input_ids, max_length=8)
assert_close(expected, actual, rtol=0, atol=0)
@unittest.skipIf(not is_mistral_available(), "Mistral is not available")
def test_use_cache_mistral(self) -> None:
torch.manual_seed(0)
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
original = MistralForCausalLM(
MistralConfig(
vocab_size=16,
hidden_size=8,
intermediate_size=8,
num_hidden_layers=8,
num_attention_heads=4,
num_key_value_heads=2,
use_cache=False,
)
).eval()
adapted = get_peft_model(
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted = adapted.to(self.torch_device)
expected = adapted.generate(input_ids=input_ids, max_length=8)
# Set use_cache = True and generate output again.
adapted.base_model.config.use_cache = True
actual = adapted.generate(input_ids=input_ids, max_length=8)
assert_close(expected, actual, rtol=0, atol=0)
def test_bf16_inference(self) -> None:
if self.torch_device == "mps":
return pytest.skip("Skipping bf16 test on MPS")
"""Test that AdaptionPrompt works when Llama using a half-precision model."""
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
original = LlamaForCausalLM.from_pretrained(
"trl-internal-testing/tiny-random-LlamaForCausalLM", torch_dtype=torch.bfloat16
)
adapted = get_peft_model(
original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM")
)
adapted = adapted.to(self.torch_device)
_ = adapted.generate(input_ids=input_ids)
@unittest.expectedFailure
def test_disable_adapter(self):
llama_config = self._create_test_llama_config()
model = LlamaForCausalLM(llama_config).to(self.torch_device)
dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device)
output_before = model(dummy_input).logits
config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM")
model = get_peft_model(model, config).to(self.torch_device)
output_peft = model(dummy_input).logits
# TODO currently this fails because scores are zeroed out:
# https://github.com/huggingface/peft/blob/062d95a09eb5d1de35c0e5e23d4387daba99e2db/src/peft/tuners/adaption_prompt.py#L303
# This is fine for users but makes it difficult to test if anything happens. In the future, we will have a clean
# way to control initialization. Until then, this test is expected to fail.
assert not torch.allclose(output_before, output_peft)
with model.disable_adapter():
output_peft_disabled = model(dummy_input).logits
assert torch.allclose(output_before, output_peft_disabled)
| peft/tests/test_adaption_prompt.py/0 | {
"file_path": "peft/tests/test_adaption_prompt.py",
"repo_id": "peft",
"token_count": 16295
} | 165 |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from peft import PeftModel, PolyConfig, TaskType, get_peft_model
class TestPoly(unittest.TestCase):
def test_poly(self):
torch.manual_seed(0)
model_name_or_path = "google/flan-t5-small"
atol, rtol = 1e-6, 1e-6
r = 8 # rank of lora in poly
n_tasks = 3 # number of tasks
n_skills = 2 # number of skills (loras)
n_splits = 4 # number of heads
lr = 1e-2
num_epochs = 10
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
peft_config = PolyConfig(
task_type=TaskType.SEQ_2_SEQ_LM,
poly_type="poly",
r=r,
n_tasks=n_tasks,
n_skills=n_skills,
n_splits=n_splits,
)
model = get_peft_model(base_model, peft_config)
# generate some dummy data
text = os.__doc__.splitlines()
assert len(text) > 10
inputs = tokenizer(text, return_tensors="pt", padding=True)
inputs["task_ids"] = torch.arange(len(text)) % n_tasks
inputs["labels"] = tokenizer((["A", "B"] * 100)[: len(text)], return_tensors="pt")["input_ids"]
# simple training loop
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
losses = []
for _ in range(num_epochs):
outputs = model(**inputs)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.append(loss.item())
# loss improved by at least 50%
assert losses[-1] < (0.5 * losses[0])
# check that saving and loading works
torch.manual_seed(0)
model.eval()
logits_before = model(**inputs).logits
tokens_before = model.generate(**inputs)
with model.disable_adapter():
logits_disabled = model(**inputs).logits
tokens_disabled = model.generate(**inputs)
assert not torch.allclose(logits_before, logits_disabled, atol=atol, rtol=rtol)
assert not torch.allclose(tokens_before, tokens_disabled, atol=atol, rtol=rtol)
# saving and loading
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
loaded = PeftModel.from_pretrained(base_model, tmp_dir)
torch.manual_seed(0)
output_after = loaded(**inputs).logits
tokens_after = loaded.generate(**inputs)
assert torch.allclose(logits_before, output_after, atol=atol, rtol=rtol)
assert torch.allclose(tokens_before, tokens_after, atol=atol, rtol=rtol)
| peft/tests/test_poly.py/0 | {
"file_path": "peft/tests/test_poly.py",
"repo_id": "peft",
"token_count": 1541
} | 166 |
#!/usr/bin/env python3
""" Checkpoint Cleaning Script
Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc.
and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256
calculation for model zoo compatibility.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
import argparse
import os
import hashlib
import shutil
import tempfile
from timm.models import load_state_dict
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='output path')
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--no-hash', dest='no_hash', action='store_true',
help='no hash in output filename')
parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true',
help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint')
parser.add_argument('--safetensors', action='store_true',
help='Save weights using safetensors instead of the default torch way (pickle).')
def main():
args = parser.parse_args()
if os.path.exists(args.output):
print("Error: Output filename ({}) already exists.".format(args.output))
exit(1)
clean_checkpoint(
args.checkpoint,
args.output,
not args.no_use_ema,
args.no_hash,
args.clean_aux_bn,
safe_serialization=args.safetensors,
)
def clean_checkpoint(
checkpoint,
output,
use_ema=True,
no_hash=False,
clean_aux_bn=False,
safe_serialization: bool=False,
):
# Load an existing checkpoint to CPU, strip everything but the state_dict and re-save
if checkpoint and os.path.isfile(checkpoint):
print("=> Loading checkpoint '{}'".format(checkpoint))
state_dict = load_state_dict(checkpoint, use_ema=use_ema)
new_state_dict = {}
for k, v in state_dict.items():
if clean_aux_bn and 'aux_bn' in k:
# If all aux_bn keys are removed, the SplitBN layers will end up as normal and
# load with the unmodified model using BatchNorm2d.
continue
name = k[7:] if k.startswith('module.') else k
new_state_dict[name] = v
print("=> Loaded state_dict from '{}'".format(checkpoint))
ext = ''
if output:
checkpoint_root, checkpoint_base = os.path.split(output)
checkpoint_base, ext = os.path.splitext(checkpoint_base)
else:
checkpoint_root = ''
checkpoint_base = os.path.split(checkpoint)[1]
checkpoint_base = os.path.splitext(checkpoint_base)[0]
temp_filename = '__' + checkpoint_base
if safe_serialization:
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
safetensors.torch.save_file(new_state_dict, temp_filename)
else:
torch.save(new_state_dict, temp_filename)
with open(temp_filename, 'rb') as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
if ext:
final_ext = ext
else:
final_ext = ('.safetensors' if safe_serialization else '.pth')
if no_hash:
final_filename = checkpoint_base + final_ext
else:
final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + final_ext
shutil.move(temp_filename, os.path.join(checkpoint_root, final_filename))
print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash))
return final_filename
else:
print("Error: Checkpoint ({}) doesn't exist".format(checkpoint))
return ''
if __name__ == '__main__':
main()
| pytorch-image-models/clean_checkpoint.py/0 | {
"file_path": "pytorch-image-models/clean_checkpoint.py",
"repo_id": "pytorch-image-models",
"token_count": 1771
} | 167 |
# CSP-DarkNet
**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{bochkovskiy2020yolov4,
title={YOLOv4: Optimal Speed and Accuracy of Object Detection},
author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao},
year={2020},
eprint={2004.10934},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: CSP DarkNet
Paper:
Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection'
URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object
Models:
- Name: cspdarknet53
In Collection: CSP DarkNet
Metadata:
FLOPs: 8545018880
Parameters: 27640000
File Size: 110775135
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Mish
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- CutMix
- Label Smoothing
- Mosaic
- Polynomial Learning Rate Decay
- SGD with Momentum
- Self-Adversarial Training
- Weight Decay
Training Data:
- ImageNet
Training Resources: 1x NVIDIA RTX 2070 GPU
ID: cspdarknet53
LR: 0.1
Layers: 53
Crop Pct: '0.887'
Momentum: 0.9
Batch Size: 128
Image Size: '256'
Warmup Steps: 1000
Weight Decay: 0.0005
Interpolation: bilinear
Training Steps: 8000000
FPS (GPU RTX 2070): 66
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.05%
Top 5 Accuracy: 95.09%
-->
| pytorch-image-models/docs/models/.templates/models/csp-darknet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/csp-darknet.md",
"repo_id": "pytorch-image-models",
"token_count": 947
} | 168 |
# (Gluon) SE-ResNeXt
**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{hu2019squeezeandexcitation,
title={Squeeze-and-Excitation Networks},
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
year={2019},
eprint={1709.01507},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Gloun SEResNeXt
Paper:
Title: Squeeze-and-Excitation Networks
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
Models:
- Name: gluon_seresnext101_32x4d
In Collection: Gloun SEResNeXt
Metadata:
FLOPs: 10302923504
Parameters: 48960000
File Size: 196505510
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_seresnext101_32x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L219
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.87%
Top 5 Accuracy: 95.29%
- Name: gluon_seresnext101_64x4d
In Collection: Gloun SEResNeXt
Metadata:
FLOPs: 19958950640
Parameters: 88230000
File Size: 353875948
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_seresnext101_64x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L229
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.88%
Top 5 Accuracy: 95.31%
- Name: gluon_seresnext50_32x4d
In Collection: Gloun SEResNeXt
Metadata:
FLOPs: 5475179184
Parameters: 27560000
File Size: 110578827
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_seresnext50_32x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L209
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.92%
Top 5 Accuracy: 94.82%
-->
| pytorch-image-models/docs/models/.templates/models/gloun-seresnext.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/gloun-seresnext.md",
"repo_id": "pytorch-image-models",
"token_count": 1705
} | 169 |
# PNASNet
**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{liu2018progressive,
title={Progressive Neural Architecture Search},
author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy},
year={2018},
eprint={1712.00559},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: PNASNet
Paper:
Title: Progressive Neural Architecture Search
URL: https://paperswithcode.com/paper/progressive-neural-architecture-search
Models:
- Name: pnasnet5large
In Collection: PNASNet
Metadata:
FLOPs: 31458865950
Parameters: 86060000
File Size: 345153926
Architecture:
- Average Pooling
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- ReLU
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 100x NVIDIA P100 GPUs
ID: pnasnet5large
LR: 0.015
Dropout: 0.5
Crop Pct: '0.911'
Momentum: 0.9
Batch Size: 1600
Image Size: '331'
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/pnasnet.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.98%
Top 5 Accuracy: 18.58%
-->
| pytorch-image-models/docs/models/.templates/models/pnasnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/pnasnet.md",
"repo_id": "pytorch-image-models",
"token_count": 813
} | 170 |
# SSL ResNet
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-00546,
author = {I. Zeki Yalniz and
Herv{\'{e}} J{\'{e}}gou and
Kan Chen and
Manohar Paluri and
Dhruv Mahajan},
title = {Billion-scale semi-supervised learning for image classification},
journal = {CoRR},
volume = {abs/1905.00546},
year = {2019},
url = {http://arxiv.org/abs/1905.00546},
archivePrefix = {arXiv},
eprint = {1905.00546},
timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: SSL ResNet
Paper:
Title: Billion-scale semi-supervised learning for image classification
URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
Models:
- Name: ssl_resnet18
In Collection: SSL ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46811375
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- YFCC-100M
Training Resources: 64x GPUs
ID: ssl_resnet18
LR: 0.0015
Epochs: 30
Layers: 18
Crop Pct: '0.875'
Batch Size: 1536
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L894
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.62%
Top 5 Accuracy: 91.42%
- Name: ssl_resnet50
In Collection: SSL ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102480594
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- YFCC-100M
Training Resources: 64x GPUs
ID: ssl_resnet50
LR: 0.0015
Epochs: 30
Layers: 50
Crop Pct: '0.875'
Batch Size: 1536
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L904
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.24%
Top 5 Accuracy: 94.83%
-->
| pytorch-image-models/docs/models/.templates/models/ssl-resnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/ssl-resnet.md",
"repo_id": "pytorch-image-models",
"token_count": 1616
} | 171 |
# Scripts
A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release.
The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on
[NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples).
## Training Script
The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder.
To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value:
`./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4`
NOTE: It is recommended to use PyTorch 1.9+ w/ PyTorch native AMP and DDP instead of APEX AMP. `--amp` defaults to native AMP as of timm ver 0.4.3. `--apex-amp` will force use of APEX components if they are installed.
## Validation / Inference Scripts
Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script.
To validate with the model's pretrained weights (if they exist):
`python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained`
To run inference from a checkpoint:
`python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar` | pytorch-image-models/docs/scripts.md/0 | {
"file_path": "pytorch-image-models/docs/scripts.md",
"repo_id": "pytorch-image-models",
"token_count": 511
} | 172 |
#!/usr/bin/env python3
"""PyTorch Inference Script
An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import json
import logging
import os
import time
from contextlib import suppress
from functools import partial
import numpy as np
import pandas as pd
import torch
from timm.data import create_dataset, create_loader, resolve_data_config, ImageNetInfo, infer_imagenet_subset
from timm.layers import apply_test_time_pool
from timm.models import create_model
from timm.utils import AverageMeter, setup_default_logging, set_jit_fuser, ParseKwargs
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_FMT_EXT = {
'json': '.json',
'json-record': '.json',
'json-split': '.json',
'parquet': '.parquet',
'csv': '.csv',
}
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('inference')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--model', '-m', metavar='MODEL', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-dir', type=str, default=None,
help='folder for output results')
parser.add_argument('--results-file', type=str, default=None,
help='results filename (relative to results-dir)')
parser.add_argument('--results-format', type=str, nargs='+', default=['csv'],
help='results format (one of "csv", "json", "json-split", "parquet")')
parser.add_argument('--results-separate-col', action='store_true', default=False,
help='separate output columns per result index.')
parser.add_argument('--topk', default=1, type=int,
metavar='N', help='Top-k to output to CSV')
parser.add_argument('--fullname', action='store_true', default=False,
help='use full sample name in output (not just basename).')
parser.add_argument('--filename-col', type=str, default='filename',
help='name for filename / sample name column')
parser.add_argument('--index-col', type=str, default='index',
help='name for output indices column(s)')
parser.add_argument('--label-col', type=str, default='label',
help='name for output indices column(s)')
parser.add_argument('--output-col', type=str, default=None,
help='name for logit/probs output column(s)')
parser.add_argument('--output-type', type=str, default='prob',
help='output type colum ("prob" for probabilities, "logit" for raw logits)')
parser.add_argument('--label-type', type=str, default='description',
help='type of label to output, one of "none", "name", "description", "detailed"')
parser.add_argument('--include-index', action='store_true', default=False,
help='include the class index in results')
parser.add_argument('--exclude-output', action='store_true', default=False,
help='exclude logits/probs from results, just indices. topk must be set !=0.')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
# resolve AMP arguments based on PyTorch / Apex availability
amp_autocast = suppress
if args.amp:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
assert args.amp_dtype in ('float16', 'bfloat16')
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Running inference in mixed precision with native PyTorch AMP.')
else:
_logger.info('Running inference in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=in_chans,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
_logger.info(
f'Model {args.model} created, param count: {sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device)
model.eval()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
root_dir = args.data or args.data_dir
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
class_map=args.class_map,
)
if test_time_pool:
data_config['crop_pct'] = 1.0
workers = 1 if 'tfds' in args.dataset or 'wds' in args.dataset else args.workers
loader = create_loader(
dataset,
batch_size=args.batch_size,
use_prefetcher=True,
num_workers=workers,
**data_config,
)
to_label = None
if args.label_type in ('name', 'description', 'detail'):
imagenet_subset = infer_imagenet_subset(model)
if imagenet_subset is not None:
dataset_info = ImageNetInfo(imagenet_subset)
if args.label_type == 'name':
to_label = lambda x: dataset_info.index_to_label_name(x)
elif args.label_type == 'detail':
to_label = lambda x: dataset_info.index_to_description(x, detailed=True)
else:
to_label = lambda x: dataset_info.index_to_description(x)
to_label = np.vectorize(to_label)
else:
_logger.error("Cannot deduce ImageNet subset from model, no labelling will be performed.")
top_k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
all_indices = []
all_labels = []
all_outputs = []
use_probs = args.output_type == 'prob'
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
with amp_autocast():
output = model(input)
if use_probs:
output = output.softmax(-1)
if top_k:
output, indices = output.topk(top_k)
np_indices = indices.cpu().numpy()
if args.include_index:
all_indices.append(np_indices)
if to_label is not None:
np_labels = to_label(np_indices)
all_labels.append(np_labels)
all_outputs.append(output.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
all_indices = np.concatenate(all_indices, axis=0) if all_indices else None
all_labels = np.concatenate(all_labels, axis=0) if all_labels else None
all_outputs = np.concatenate(all_outputs, axis=0).astype(np.float32)
filenames = loader.dataset.filenames(basename=not args.fullname)
output_col = args.output_col or ('prob' if use_probs else 'logit')
data_dict = {args.filename_col: filenames}
if args.results_separate_col and all_outputs.shape[-1] > 1:
if all_indices is not None:
for i in range(all_indices.shape[-1]):
data_dict[f'{args.index_col}_{i}'] = all_indices[:, i]
if all_labels is not None:
for i in range(all_labels.shape[-1]):
data_dict[f'{args.label_col}_{i}'] = all_labels[:, i]
for i in range(all_outputs.shape[-1]):
data_dict[f'{output_col}_{i}'] = all_outputs[:, i]
else:
if all_indices is not None:
if all_indices.shape[-1] == 1:
all_indices = all_indices.squeeze(-1)
data_dict[args.index_col] = list(all_indices)
if all_labels is not None:
if all_labels.shape[-1] == 1:
all_labels = all_labels.squeeze(-1)
data_dict[args.label_col] = list(all_labels)
if all_outputs.shape[-1] == 1:
all_outputs = all_outputs.squeeze(-1)
data_dict[output_col] = list(all_outputs)
df = pd.DataFrame(data=data_dict)
results_filename = args.results_file
if results_filename:
filename_no_ext, ext = os.path.splitext(results_filename)
if ext and ext in _FMT_EXT.values():
# if filename provided with one of expected ext,
# remove it as it will be added back
results_filename = filename_no_ext
else:
# base default filename on model name + img-size
img_size = data_config["input_size"][1]
results_filename = f'{args.model}-{img_size}'
if args.results_dir:
results_filename = os.path.join(args.results_dir, results_filename)
for fmt in args.results_format:
save_results(df, results_filename, fmt)
print(f'--result')
print(df.set_index(args.filename_col).to_json(orient='index', indent=4))
def save_results(df, results_filename, results_format='csv', filename_col='filename'):
results_filename += _FMT_EXT[results_format]
if results_format == 'parquet':
df.set_index(filename_col).to_parquet(results_filename)
elif results_format == 'json':
df.set_index(filename_col).to_json(results_filename, indent=4, orient='index')
elif results_format == 'json-records':
df.to_json(results_filename, lines=True, orient='records')
elif results_format == 'json-split':
df.to_json(results_filename, indent=4, orient='split', index=False)
else:
df.to_csv(results_filename, index=False)
if __name__ == '__main__':
main()
| pytorch-image-models/inference.py/0 | {
"file_path": "pytorch-image-models/inference.py",
"repo_id": "pytorch-image-models",
"token_count": 6803
} | 173 |
[dist_conda]
conda_name_differences = 'torch:pytorch'
channels = pytorch
noarch = True
[metadata]
url = "https://github.com/huggingface/pytorch-image-models" | pytorch-image-models/setup.cfg/0 | {
"file_path": "pytorch-image-models/setup.cfg",
"repo_id": "pytorch-image-models",
"token_count": 65
} | 174 |
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union
class DatasetInfo(ABC):
def __init__(self):
pass
@abstractmethod
def num_classes(self):
pass
@abstractmethod
def label_names(self):
pass
@abstractmethod
def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]:
pass
@abstractmethod
def index_to_label_name(self, index) -> str:
pass
@abstractmethod
def index_to_description(self, index: int, detailed: bool = False) -> str:
pass
@abstractmethod
def label_name_to_description(self, label: str, detailed: bool = False) -> str:
pass
class CustomDatasetInfo(DatasetInfo):
""" DatasetInfo that wraps passed values for custom datasets."""
def __init__(
self,
label_names: Union[List[str], Dict[int, str]],
label_descriptions: Optional[Dict[str, str]] = None
):
super().__init__()
assert len(label_names) > 0
self._label_names = label_names # label index => label name mapping
self._label_descriptions = label_descriptions # label name => label description mapping
if self._label_descriptions is not None:
# validate descriptions (label names required)
assert isinstance(self._label_descriptions, dict)
for n in self._label_names:
assert n in self._label_descriptions
def num_classes(self):
return len(self._label_names)
def label_names(self):
return self._label_names
def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]:
return self._label_descriptions
def label_name_to_description(self, label: str, detailed: bool = False) -> str:
if self._label_descriptions:
return self._label_descriptions[label]
return label # return label name itself if a descriptions is not present
def index_to_label_name(self, index) -> str:
assert 0 <= index < len(self._label_names)
return self._label_names[index]
def index_to_description(self, index: int, detailed: bool = False) -> str:
label = self.index_to_label_name(index)
return self.label_name_to_description(label, detailed=detailed)
| pytorch-image-models/timm/data/dataset_info.py/0 | {
"file_path": "pytorch-image-models/timm/data/dataset_info.py",
"repo_id": "pytorch-image-models",
"token_count": 941
} | 175 |
""" Dataset reader that wraps TFDS datasets
Wraps many (most?) TFDS image-classification datasets
from https://github.com/tensorflow/datasets
https://www.tensorflow.org/datasets/catalog/overview#image_classification
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import os
import sys
from typing import Optional
import torch
import torch.distributed as dist
from PIL import Image
try:
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu)
import tensorflow_datasets as tfds
try:
tfds.even_splits('', 1, drop_remainder=False) # non-buggy even_splits has drop_remainder arg
has_buggy_even_splits = False
except TypeError:
print("Warning: This version of tfds doesn't have the latest even_splits impl. "
"Please update or use tfds-nightly for better fine-grained split behaviour.")
has_buggy_even_splits = True
# NOTE uncomment below if having file limit issues on dataset build (or alter your OS defaults)
# import resource
# low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
# resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
except ImportError as e:
print(e)
print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.")
raise e
from .class_map import load_class_map
from .reader import Reader
from .shared_count import SharedCount
MAX_TP_SIZE = int(os.environ.get('TFDS_TP_SIZE', 8)) # maximum TF threadpool size, for jpeg decodes and queuing activities
SHUFFLE_SIZE = int(os.environ.get('TFDS_SHUFFLE_SIZE', 8192)) # samples to shuffle in DS queue
PREFETCH_SIZE = int(os.environ.get('TFDS_PREFETCH_SIZE', 2048)) # samples to prefetch
@tfds.decode.make_decoder()
def decode_example(serialized_image, feature, dct_method='INTEGER_ACCURATE', channels=3):
return tf.image.decode_jpeg(
serialized_image,
channels=channels,
dct_method=dct_method,
)
def even_split_indices(split, n, num_samples):
partitions = [round(i * num_samples / n) for i in range(n + 1)]
return [f"{split}[{partitions[i]}:{partitions[i + 1]}]" for i in range(n)]
def get_class_labels(info):
if 'label' not in info.features:
return {}
class_label = info.features['label']
class_to_idx = {n: class_label.str2int(n) for n in class_label.names}
return class_to_idx
class ReaderTfds(Reader):
""" Wrap Tensorflow Datasets for use in PyTorch
There several things to be aware of:
* To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of
dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last
https://github.com/pytorch/pytorch/issues/33413
* With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch
from each worker could be a different size. For training this is worked around by option above, for
validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced
across replicas are of same size. This will slightly alter the results, distributed validation will not be
100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse
since there are up to N * J extra samples with IterableDatasets.
* The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of
replicas and dataloader workers you can use. For really small datasets that only contain a few shards
you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the
benefit of distributed training or fast dataloading should be much less for small datasets.
* This wrapper is currently configured to return individual, decompressed image samples from the TFDS
dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible
to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream
components.
"""
def __init__(
self,
name,
root=None,
split='train',
class_map=None,
is_training=False,
batch_size=1,
download=False,
repeats=0,
seed=42,
input_key='image',
input_img_mode='RGB',
target_key='label',
target_img_mode='',
prefetch_size=None,
shuffle_size=None,
max_threadpool_size=None
):
""" Tensorflow-datasets Wrapper
Args:
root: root data dir (ie your TFDS_DATA_DIR. not dataset specific sub-dir)
name: tfds dataset name (eg `imagenet2012`)
split: tfds dataset split (can use all TFDS split strings eg `train[:10%]`)
is_training: training mode, shuffle enabled, dataset len rounded by batch_size
batch_size: batch_size to use to unsure total samples % batch_size == 0 in training across all dis nodes
download: download and build TFDS dataset if set, otherwise must use tfds CLI
repeats: iterate through (repeat) the dataset this many times per iteration (once if 0 or 1)
seed: common seed for shard shuffle across all distributed/worker instances
input_key: name of Feature to return as data (input)
input_img_mode: image mode if input is an image (currently PIL mode string)
target_key: name of Feature to return as target (label)
target_img_mode: image mode if target is an image (currently PIL mode string)
prefetch_size: override default tf.data prefetch buffer size
shuffle_size: override default tf.data shuffle buffer size
max_threadpool_size: override default threadpool size for tf.data
"""
super().__init__()
self.root = root
self.split = split
self.is_training = is_training
self.batch_size = batch_size
self.repeats = repeats
self.common_seed = seed # a seed that's fixed across all worker / distributed instances
# performance settings
self.prefetch_size = prefetch_size or PREFETCH_SIZE
self.shuffle_size = shuffle_size or SHUFFLE_SIZE
self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE
# TFDS builder and split information
self.input_key = input_key # FIXME support tuples / lists of inputs and targets and full range of Feature
self.input_img_mode = input_img_mode
self.target_key = target_key
self.target_img_mode = target_img_mode # for dense pixel targets
self.builder = tfds.builder(name, data_dir=root)
# NOTE: the tfds command line app can be used download & prepare datasets if you don't enable download flag
if download:
self.builder.download_and_prepare()
self.remap_class = False
if class_map:
self.class_to_idx = load_class_map(class_map)
self.remap_class = True
else:
self.class_to_idx = get_class_labels(self.builder.info) if self.target_key == 'label' else {}
self.split_info = self.builder.info.splits[split]
self.num_samples = self.split_info.num_examples
# Distributed world state
self.dist_rank = 0
self.dist_num_replicas = 1
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
# Attributes that are updated in _lazy_init, including the tf.data pipeline itself
self.global_num_workers = 1
self.num_workers = 1
self.worker_info = None
self.worker_seed = 0 # seed unique to each work instance
self.subsplit = None # set when data is distributed across workers using sub-splits
self.ds = None # initialized lazily on each dataloader worker process
self.init_count = 0 # number of ds TF data pipeline initializations
self.epoch_count = SharedCount()
# FIXME need to determine if reinit_each_iter is necessary. I'm don't completely trust behaviour
# of `shuffle_reshuffle_each_iteration` when there are multiple workers / nodes across epochs
self.reinit_each_iter = self.is_training
def set_epoch(self, count):
self.epoch_count.value = count
def set_loader_cfg(
self,
num_workers: Optional[int] = None,
):
if self.ds is not None:
return
if num_workers is not None:
self.num_workers = num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
def _lazy_init(self):
""" Lazily initialize the dataset.
This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that
will be using the dataset instance. The __init__ method is called on the main process,
this will be called in a dataloader worker process.
NOTE: There will be problems if you try to re-use this dataset across different loader/worker
instances once it has been initialized. Do not call any dataset methods that can call _lazy_init
before it is passed to dataloader.
"""
worker_info = torch.utils.data.get_worker_info()
# setup input context to split dataset across distributed processes
num_workers = 1
global_worker_id = 0
if worker_info is not None:
self.worker_info = worker_info
self.worker_seed = worker_info.seed
self.num_workers = worker_info.num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
global_worker_id = self.dist_rank * self.num_workers + worker_info.id
""" Data sharding
InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used.
My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True)
between the splits each iteration, but that understanding could be wrong.
I am currently using a mix of InputContext shard assignment and fine-grained sub-splits for distributing
the data across workers. For training InputContext is used to assign shards to nodes unless num_shards
in dataset < total number of workers. Otherwise sub-split API is used for datasets without enough shards or
for validation where we can't drop samples and need to avoid minimize uneven splits to avoid padding.
"""
should_subsplit = self.global_num_workers > 1 and (
self.split_info.num_shards < self.global_num_workers or not self.is_training)
if should_subsplit:
# split the dataset w/o using sharding for more even samples / worker, can result in less optimal
# read patterns for distributed training (overlap across shards) so better to use InputContext there
if has_buggy_even_splits:
# my even_split workaround doesn't work on subsplits, upgrade tfds!
if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo):
subsplits = even_split_indices(self.split, self.global_num_workers, self.num_samples)
self.subsplit = subsplits[global_worker_id]
else:
subsplits = tfds.even_splits(self.split, self.global_num_workers)
self.subsplit = subsplits[global_worker_id]
input_context = None
if self.global_num_workers > 1 and self.subsplit is None:
# set input context to divide shards among distributed replicas
input_context = tf.distribute.InputContext(
num_input_pipelines=self.global_num_workers,
input_pipeline_id=global_worker_id,
num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact?
)
read_config = tfds.ReadConfig(
shuffle_seed=self.common_seed + self.epoch_count.value,
shuffle_reshuffle_each_iteration=True,
input_context=input_context,
)
ds = self.builder.as_dataset(
split=self.subsplit or self.split,
shuffle_files=self.is_training,
decoders=dict(image=decode_example(channels=1 if self.input_img_mode == 'L' else 3)),
read_config=read_config,
)
# avoid overloading threading w/ combo of TF ds threads + PyTorch workers
options = tf.data.Options()
thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading'
getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // self.num_workers)
getattr(options, thread_member).max_intra_op_parallelism = 1
ds = ds.with_options(options)
if self.is_training or self.repeats > 1:
# to prevent excessive drop_last batch behaviour w/ IterableDatasets
# see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading
ds = ds.repeat() # allow wrap around and break iteration manually
if self.is_training:
ds = ds.shuffle(min(self.num_samples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed)
ds = ds.prefetch(min(self.num_samples // self.global_num_workers, self.prefetch_size))
self.ds = tfds.as_numpy(ds)
self.init_count += 1
def _num_samples_per_worker(self):
num_worker_samples = \
max(1, self.repeats) * self.num_samples / max(self.global_num_workers, self.dist_num_replicas)
if self.is_training or self.dist_num_replicas > 1:
num_worker_samples = math.ceil(num_worker_samples)
if self.is_training:
num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size
return int(num_worker_samples)
def __iter__(self):
if self.ds is None or self.reinit_each_iter:
self._lazy_init()
# Compute a rounded up sample count that is used to:
# 1. make batches even cross workers & replicas in distributed validation.
# This adds extra samples and will slightly alter validation results.
# 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size
# batches are produced (underlying tfds iter wraps around)
target_sample_count = self._num_samples_per_worker()
# Iterate until exhausted or sample count hits target when training (ds.repeat enabled)
sample_count = 0
for sample in self.ds:
input_data = sample[self.input_key]
if self.input_img_mode:
if self.input_img_mode == 'L' and input_data.ndim == 3:
input_data = input_data[:, :, 0]
input_data = Image.fromarray(input_data, mode=self.input_img_mode)
target_data = sample[self.target_key]
if self.target_img_mode:
# dense pixel target
target_data = Image.fromarray(target_data, mode=self.target_img_mode)
elif self.remap_class:
target_data = self.class_to_idx[target_data]
yield input_data, target_data
sample_count += 1
if self.is_training and sample_count >= target_sample_count:
# Need to break out of loop when repeat() is enabled for training w/ oversampling
# this results in extra samples per epoch but seems more desirable than dropping
# up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes)
break
# Pad across distributed nodes (make counts equal by adding samples)
if not self.is_training and self.dist_num_replicas > 1 and self.subsplit is not None and \
0 < sample_count < target_sample_count:
# Validation batch padding only done for distributed training where results are reduced across nodes.
# For single process case, it won't matter if workers return different batch sizes.
# If using input_context or % based splits, sample count can vary significantly across workers and this
# approach should not be used (hence disabled if self.subsplit isn't set).
while sample_count < target_sample_count:
yield input_data, target_data # yield prev sample again
sample_count += 1
def __len__(self):
num_samples = self._num_samples_per_worker() * self.num_workers
return num_samples
def _filename(self, index, basename=False, absolute=False):
assert False, "Not supported" # no random access to samples
def filenames(self, basename=False, absolute=False):
""" Return all filenames in dataset, overrides base"""
if self.ds is None:
self._lazy_init()
names = []
for sample in self.ds:
if len(names) > self.num_samples:
break # safety for ds.repeat() case
if 'file_name' in sample:
name = sample['file_name']
elif 'filename' in sample:
name = sample['filename']
elif 'id' in sample:
name = sample['id']
else:
assert False, "No supported name field present"
names.append(name)
return names
| pytorch-image-models/timm/data/readers/reader_tfds.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader_tfds.py",
"repo_id": "pytorch-image-models",
"token_count": 7089
} | 176 |
""" CBAM (sort-of) Attention
Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521
WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on
some tasks, especially fine-grained it seems. I may end up removing this impl.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
import torch.nn.functional as F
from .conv_bn_act import ConvNormAct
from .create_act import create_act_layer, get_act_layer
from .helpers import make_divisible
class ChannelAttn(nn.Module):
""" Original CBAM channel attention module, currently avg + max pool variant only.
"""
def __init__(
self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
super(ChannelAttn, self).__init__()
if not rd_channels:
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True))))
x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True))))
return x * self.gate(x_avg + x_max)
class LightChannelAttn(ChannelAttn):
"""An experimental 'lightweight' that sums avg + max pool first
"""
def __init__(
self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
super(LightChannelAttn, self).__init__(
channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias)
def forward(self, x):
x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True)
x_attn = self.fc2(self.act(self.fc1(x_pool)))
return x * F.sigmoid(x_attn)
class SpatialAttn(nn.Module):
""" Original CBAM spatial attention module
"""
def __init__(self, kernel_size=7, gate_layer='sigmoid'):
super(SpatialAttn, self).__init__()
self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1)
x_attn = self.conv(x_attn)
return x * self.gate(x_attn)
class LightSpatialAttn(nn.Module):
"""An experimental 'lightweight' variant that sums avg_pool and max_pool results.
"""
def __init__(self, kernel_size=7, gate_layer='sigmoid'):
super(LightSpatialAttn, self).__init__()
self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True)
x_attn = self.conv(x_attn)
return x * self.gate(x_attn)
class CbamModule(nn.Module):
def __init__(
self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
super(CbamModule, self).__init__()
self.channel = ChannelAttn(
channels, rd_ratio=rd_ratio, rd_channels=rd_channels,
rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias)
self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer)
def forward(self, x):
x = self.channel(x)
x = self.spatial(x)
return x
class LightCbamModule(nn.Module):
def __init__(
self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
super(LightCbamModule, self).__init__()
self.channel = LightChannelAttn(
channels, rd_ratio=rd_ratio, rd_channels=rd_channels,
rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias)
self.spatial = LightSpatialAttn(spatial_kernel_size)
def forward(self, x):
x = self.channel(x)
x = self.spatial(x)
return x
| pytorch-image-models/timm/layers/cbam.py/0 | {
"file_path": "pytorch-image-models/timm/layers/cbam.py",
"repo_id": "pytorch-image-models",
"token_count": 2016
} | 177 |
from enum import Enum
from typing import Union
import torch
class Format(str, Enum):
NCHW = 'NCHW'
NHWC = 'NHWC'
NCL = 'NCL'
NLC = 'NLC'
FormatT = Union[str, Format]
def get_spatial_dim(fmt: FormatT):
fmt = Format(fmt)
if fmt is Format.NLC:
dim = (1,)
elif fmt is Format.NCL:
dim = (2,)
elif fmt is Format.NHWC:
dim = (1, 2)
else:
dim = (2, 3)
return dim
def get_channel_dim(fmt: FormatT):
fmt = Format(fmt)
if fmt is Format.NHWC:
dim = 3
elif fmt is Format.NLC:
dim = 2
else:
dim = 1
return dim
def nchw_to(x: torch.Tensor, fmt: Format):
if fmt == Format.NHWC:
x = x.permute(0, 2, 3, 1)
elif fmt == Format.NLC:
x = x.flatten(2).transpose(1, 2)
elif fmt == Format.NCL:
x = x.flatten(2)
return x
def nhwc_to(x: torch.Tensor, fmt: Format):
if fmt == Format.NCHW:
x = x.permute(0, 3, 1, 2)
elif fmt == Format.NLC:
x = x.flatten(1, 2)
elif fmt == Format.NCL:
x = x.flatten(1, 2).transpose(1, 2)
return x
| pytorch-image-models/timm/layers/format.py/0 | {
"file_path": "pytorch-image-models/timm/layers/format.py",
"repo_id": "pytorch-image-models",
"token_count": 572
} | 178 |
""" Normalization layers and wrappers
Norm layer definitions that support fast norm and consistent channel arg order (always first arg).
Hacked together by / Copyright 2022 Ross Wightman
"""
import numbers
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm
class GroupNorm(nn.GroupNorm):
def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True):
# NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN
super().__init__(num_groups, num_channels, eps=eps, affine=affine)
self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)
def forward(self, x):
if self.fast_norm:
return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
else:
return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
class GroupNorm1(nn.GroupNorm):
""" Group Normalization with 1 group.
Input: tensor in shape [B, C, *]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.fast_norm:
return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
else:
return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
class LayerNorm(nn.LayerNorm):
""" LayerNorm w/ fast norm option
"""
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self._fast_norm:
x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
else:
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x
class LayerNorm2d(nn.LayerNorm):
""" LayerNorm for channels of '2D' spatial NCHW tensors """
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.permute(0, 2, 3, 1)
if self._fast_norm:
x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
else:
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2)
return x
def _is_contiguous(tensor: torch.Tensor) -> bool:
# jit is oh so lovely :/
if torch.jit.is_scripting():
return tensor.is_contiguous()
else:
return tensor.is_contiguous(memory_format=torch.contiguous_format)
@torch.jit.script
def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float):
s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True)
x = (x - u) * torch.rsqrt(s + eps)
x = x * weight[:, None, None] + bias[:, None, None]
return x
def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float):
u = x.mean(dim=1, keepdim=True)
s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0)
x = (x - u) * torch.rsqrt(s + eps)
x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1)
return x
class LayerNormExp2d(nn.LayerNorm):
""" LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W).
Experimental implementation w/ manual norm for tensors non-contiguous tensors.
This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last
layout. However, benefits are not always clear and can perform worse on other GPUs.
"""
def __init__(self, num_channels, eps=1e-6):
super().__init__(num_channels, eps=eps)
def forward(self, x) -> torch.Tensor:
if _is_contiguous(x):
x = F.layer_norm(
x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)
else:
x = _layer_norm_cf(x, self.weight, self.bias, self.eps)
return x
class RmsNorm(nn.Module):
""" RmsNorm w/ fast (apex) norm if available
"""
__constants__ = ['normalized_shape', 'eps', 'elementwise_affine']
normalized_shape: Tuple[int, ...]
eps: float
elementwise_affine: bool
def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
normalized_shape = channels
if isinstance(normalized_shape, numbers.Integral):
# mypy error: incompatible types in assignment
normalized_shape = (normalized_shape,) # type: ignore[assignment]
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
self.eps = eps
self.elementwise_affine = affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.elementwise_affine:
nn.init.ones_(self.weight)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# NOTE fast norm fallback needs our rms norm impl, so both paths through here.
# Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed.
x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps)
return x
| pytorch-image-models/timm/layers/norm.py/0 | {
"file_path": "pytorch-image-models/timm/layers/norm.py",
"repo_id": "pytorch-image-models",
"token_count": 2520
} | 179 |
""" Test Time Pooling (Average-Max Pool)
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from torch import nn
import torch.nn.functional as F
from .adaptive_avgmax_pool import adaptive_avgmax_pool2d
_logger = logging.getLogger(__name__)
class TestTimePoolHead(nn.Module):
def __init__(self, base, original_pool=7):
super(TestTimePoolHead, self).__init__()
self.base = base
self.original_pool = original_pool
base_fc = self.base.get_classifier()
if isinstance(base_fc, nn.Conv2d):
self.fc = base_fc
else:
self.fc = nn.Conv2d(
self.base.num_features, self.base.num_classes, kernel_size=1, bias=True)
self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size()))
self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size()))
self.base.reset_classifier(0) # delete original fc layer
def forward(self, x):
x = self.base.forward_features(x)
x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1)
x = self.fc(x)
x = adaptive_avgmax_pool2d(x, 1)
return x.view(x.size(0), -1)
def apply_test_time_pool(model, config, use_test_size=False):
test_time_pool = False
if not hasattr(model, 'default_cfg') or not model.default_cfg:
return model, False
if use_test_size and 'test_input_size' in model.default_cfg:
df_input_size = model.default_cfg['test_input_size']
else:
df_input_size = model.default_cfg['input_size']
if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]:
_logger.info('Target input size %s > pretrained default %s, using test time pooling' %
(str(config['input_size'][-2:]), str(df_input_size[-2:])))
model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size'])
test_time_pool = True
return model, test_time_pool
| pytorch-image-models/timm/layers/test_time_pool.py/0 | {
"file_path": "pytorch-image-models/timm/layers/test_time_pool.py",
"repo_id": "pytorch-image-models",
"token_count": 881
} | 180 |
""" Model creation / weight loading / state_dict helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
from collections import OrderedDict
from typing import Any, Callable, Dict, Optional, Union
import torch
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
_logger = logging.getLogger(__name__)
__all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint']
def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]:
# 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training
cleaned_state_dict = {}
for k, v in state_dict.items():
name = k[7:] if k.startswith('module.') else k
cleaned_state_dict[name] = v
return cleaned_state_dict
def load_state_dict(
checkpoint_path: str,
use_ema: bool = True,
device: Union[str, torch.device] = 'cpu',
) -> Dict[str, Any]:
if checkpoint_path and os.path.isfile(checkpoint_path):
# Check if safetensors or not and load weights accordingly
if str(checkpoint_path).endswith(".safetensors"):
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
checkpoint = safetensors.torch.load_file(checkpoint_path, device=device)
else:
checkpoint = torch.load(checkpoint_path, map_location=device)
state_dict_key = ''
if isinstance(checkpoint, dict):
if use_ema and checkpoint.get('state_dict_ema', None) is not None:
state_dict_key = 'state_dict_ema'
elif use_ema and checkpoint.get('model_ema', None) is not None:
state_dict_key = 'model_ema'
elif 'state_dict' in checkpoint:
state_dict_key = 'state_dict'
elif 'model' in checkpoint:
state_dict_key = 'model'
state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint)
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(
model: torch.nn.Module,
checkpoint_path: str,
use_ema: bool = True,
device: Union[str, torch.device] = 'cpu',
strict: bool = True,
remap: bool = False,
filter_fn: Optional[Callable] = None,
):
if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
# numpy checkpoint, try to load via model specific load_pretrained fn
if hasattr(model, 'load_pretrained'):
model.load_pretrained(checkpoint_path)
else:
raise NotImplementedError('Model cannot load numpy checkpoint')
return
state_dict = load_state_dict(checkpoint_path, use_ema, device=device)
if remap:
state_dict = remap_state_dict(state_dict, model)
elif filter_fn:
state_dict = filter_fn(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def remap_state_dict(
state_dict: Dict[str, Any],
model: torch.nn.Module,
allow_reshape: bool = True
):
""" remap checkpoint by iterating over state dicts in order (ignoring original keys).
This assumes models (and originating state dict) were created with params registered in same order.
"""
out_dict = {}
for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()):
assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.'
if va.shape != vb.shape:
if allow_reshape:
vb = vb.reshape(va.shape)
else:
assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.'
out_dict[ka] = vb
return out_dict
def resume_checkpoint(
model: torch.nn.Module,
checkpoint_path: str,
optimizer: torch.optim.Optimizer = None,
loss_scaler: Any = None,
log_info: bool = True,
):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
if log_info:
_logger.info('Restoring model state from checkpoint...')
state_dict = clean_state_dict(checkpoint['state_dict'])
model.load_state_dict(state_dict)
if optimizer is not None and 'optimizer' in checkpoint:
if log_info:
_logger.info('Restoring optimizer state from checkpoint...')
optimizer.load_state_dict(checkpoint['optimizer'])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info('Restoring AMP loss scaler state from checkpoint...')
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
| pytorch-image-models/timm/models/_helpers.py/0 | {
"file_path": "pytorch-image-models/timm/models/_helpers.py",
"repo_id": "pytorch-image-models",
"token_count": 2546
} | 181 |
""" ConViT Model
@article{d2021convit,
title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases},
author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent},
journal={arXiv preprint arXiv:2103.10697},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.10697
Original code: https://github.com/facebookresearch/convit, original copyright below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
'''These modules are adapted from those of timm, see
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
'''
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs
from .vision_transformer_hybrid import HybridEmbed
__all__ = ['ConVit']
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class GPSA(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
locality_strength=1.,
):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.locality_strength = locality_strength
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None
def forward(self, x):
B, N, C = x.shape
if self.rel_indices is None or self.rel_indices.shape[1] != N:
self.rel_indices = self.get_rel_indices(N)
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attention(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k = qk[0], qk[1]
pos_score = self.rel_indices.expand(B, -1, -1, -1)
pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1, -1, 1, 1)
attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn /= attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map=False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:, :, -1] ** .5
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 # max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads ** .5)
center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1 + kernel_size * h2
self.pos_proj.weight.data[position, 2] = -1
self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance
self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance
self.pos_proj.weight.data *= self.locality_strength
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
img_size = int(num_patches ** .5)
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
rel_indices[:, :, :, 2] = indd.unsqueeze(0)
rel_indices[:, :, :, 1] = indy.unsqueeze(0)
rel_indices[:, :, :, 0] = indx.unsqueeze(0)
device = self.qk.weight.device
return rel_indices.to(device)
class MHSA(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def get_attention_map(self, x, return_map=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0)
img_size = int(N ** .5)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
distances = indd ** .5
distances = distances.to(x.device)
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N
if return_map:
return dist, attn_map
else:
return dist
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm,
use_gpsa=True,
locality_strength=1.,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
locality_strength=locality_strength,
)
else:
self.attn = MHSA(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
hybrid_backbone=None,
norm_layer=LayerNorm,
local_up_to_layer=3,
locality_strength=1.,
use_pos_embed=True,
):
super().__init__()
assert global_pool in ('', 'avg', 'token')
embed_dim *= num_heads
self.num_classes = num_classes
self.global_pool = global_pool
self.local_up_to_layer = local_up_to_layer
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.num_patches = num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
use_gpsa=i < local_up_to_layer,
locality_strength=locality_strength,
) for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
for n, m in self.named_modules():
if hasattr(m, 'local_init'):
m.local_init()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
for u, blk in enumerate(self.blocks):
if u == self.local_up_to_layer:
x = torch.cat((cls_tokens, x), dim=1)
x = blk(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_convit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
return build_model_with_cfg(ConVit, variant, pretrained, **kwargs)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# ConViT
'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'),
'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'),
'convit_base.fb_in1k': _cfg(hf_hub_id='timm/')
})
@register_model
def convit_tiny(pretrained=False, **kwargs) -> ConVit:
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4)
model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convit_small(pretrained=False, **kwargs) -> ConVit:
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9)
model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convit_base(pretrained=False, **kwargs) -> ConVit:
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16)
model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/convit.py/0 | {
"file_path": "pytorch-image-models/timm/models/convit.py",
"repo_id": "pytorch-image-models",
"token_count": 7716
} | 182 |
""" EVA
EVA from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636
@article{EVA,
title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale},
author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang,
Tiejun and Wang, Xinlong and Cao, Yue},
journal={arXiv preprint arXiv:2211.07636},
year={2022}
}
EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331
@article{EVA02,
title={EVA-02: A Visual Representation for Neon Genesis},
author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue},
journal={arXiv preprint arXiv:2303.11331},
year={2023}
}
This file contains EVA & EVA02 model implementations evolved from BEiT, additional models in vision_transformer.py.
Modifications by / Copyright 2023 Ross Wightman, original copyrights below
"""
# EVA models Copyright (c) 2022 BAAI-Vision
# EVA02 models Copyright (c) 2023 BAAI-Vision
import math
from typing import Callable, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, \
apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, \
to_2tuple, use_fused_attn
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model
__all__ = ['Eva']
class EvaAttention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
qkv_fused: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
attn_head_dim: Optional[int] = None,
norm_layer: Optional[Callable] = None,
):
"""
Args:
dim:
num_heads:
qkv_bias:
qkv_fused:
attn_drop:
proj_drop:
attn_head_dim:
norm_layer:
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
if qkv_fused:
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
self.q_proj = self.k_proj = self.v_proj = None
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = self.k_bias = self.v_bias = None
else:
self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias)
self.qkv = None
self.q_bias = self.k_bias = self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity()
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(
self,
x,
rope: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
):
B, N, C = x.shape
if self.qkv is not None:
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
else:
q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C
k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2)
v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2)
if rope is not None:
q = torch.cat([q[:, :, :1, :], apply_rot_embed_cat(q[:, :, 1:, :], rope)], 2).type_as(v)
k = torch.cat([k[:, :, :1, :], apply_rot_embed_cat(k[:, :, 1:, :], rope)], 2).type_as(v)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
if attn_mask is not None:
attn_mask = attn_mask.to(torch.bool)
attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.norm(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
class EvaBlock(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = True,
qkv_fused: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
scale_attn_inner: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
attn_head_dim: Optional[int] = None,
):
"""
Args:
dim:
num_heads:
qkv_bias:
qkv_fused:
mlp_ratio:
swiglu_mlp:
scale_mlp:
scale_attn_inner:
proj_drop:
attn_drop:
drop_path:
init_values:
act_layer:
norm_layer:
attn_head_dim:
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = EvaAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qkv_fused=qkv_fused,
attn_drop=attn_drop,
proj_drop=proj_drop,
attn_head_dim=attn_head_dim,
norm_layer=norm_layer if scale_attn_inner else None,
)
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
hidden_features = int(dim * mlp_ratio)
if swiglu_mlp:
if scale_mlp:
# when norm in SwiGLU used, an impl with separate fc for gate & x is used
self.mlp = SwiGLU(
in_features=dim,
hidden_features=hidden_features,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
# w/o any extra norm, an impl with packed weights is used, matches existing GluMLP
self.mlp = GluMlp(
in_features=dim,
hidden_features=hidden_features * 2,
norm_layer=norm_layer if scale_mlp else None,
act_layer=nn.SiLU,
gate_last=False,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=hidden_features,
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None):
if self.gamma_1 is None:
x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask))
x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class EvaBlockPostNorm(nn.Module):
""" EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = True,
qkv_fused: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
scale_attn_inner: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None, # ignore for post-norm
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
attn_head_dim: Optional[int] = None,
):
"""
Args:
dim:
num_heads:
qkv_bias:
qkv_fused:
mlp_ratio:
swiglu_mlp:
scale_mlp:
scale_attn_inner:
proj_drop:
attn_drop:
drop_path:
init_values:
act_layer:
norm_layer:
attn_head_dim:
"""
super().__init__()
self.attn = EvaAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qkv_fused=qkv_fused,
attn_drop=attn_drop,
proj_drop=proj_drop,
attn_head_dim=attn_head_dim,
norm_layer=norm_layer if scale_attn_inner else None,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
hidden_features = int(dim * mlp_ratio)
if swiglu_mlp:
if scale_mlp:
# when norm in SwiGLU used, an impl with separate fc for gate & x is used
self.mlp = SwiGLU(
in_features=dim,
hidden_features=hidden_features,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
# w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP
self.mlp = GluMlp(
in_features=dim,
hidden_features=hidden_features * 2,
norm_layer=norm_layer if scale_mlp else None,
act_layer=nn.SiLU,
gate_last=False,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=hidden_features,
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class Eva(nn.Module):
""" Eva Vision Transformer w/ Abs & Rotary Pos Embed
This class implements the EVA and EVA02 models that were based on the BEiT ViT variant
* EVA - abs pos embed, global avg pool
* EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer)
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
qkv_bias: bool = True,
qkv_fused: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
scale_attn_inner: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
norm_layer: Callable = LayerNorm,
init_values: Optional[float] = None,
class_token: bool = True,
use_abs_pos_emb: bool = True,
use_rot_pos_emb: bool = False,
use_post_norm: bool = False,
dynamic_img_size: bool = False,
dynamic_img_pad: bool = False,
ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None,
head_init_scale: float = 0.001,
):
"""
Args:
img_size:
patch_size:
in_chans:
num_classes:
global_pool:
embed_dim:
depth:
num_heads:
qkv_bias:
qkv_fused:
mlp_ratio:
swiglu_mlp:
scale_mlp:
scale_attn_inner:
drop_rate:
pos_drop_rate:
proj_drop_rate:
attn_drop_rate:
drop_path_rate:
norm_layer:
init_values:
class_token:
use_abs_pos_emb:
use_rot_pos_emb:
use_post_norm:
ref_feat_shape:
head_init_scale:
"""
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.dynamic_img_size = dynamic_img_size
self.grad_checkpointing = False
embed_args = {}
if dynamic_img_size:
# flatten deferred until after pos embed
embed_args.update(dict(strict_img_size=False, output_fmt='NHWC'))
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
dynamic_img_pad=dynamic_img_pad,
**embed_args,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=self.num_prefix_tokens,
return_indices=True,
)
else:
self.patch_drop = None
if use_rot_pos_emb:
ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None
self.rope = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=None if dynamic_img_size else self.patch_embed.grid_size,
ref_feat_shape=ref_feat_shape,
)
else:
self.rope = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock
self.blocks = nn.ModuleList([
block_fn(
dim=embed_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qkv_fused=qkv_fused,
mlp_ratio=mlp_ratio,
swiglu_mlp=swiglu_mlp,
scale_mlp=scale_mlp,
scale_attn_inner=scale_attn_inner,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
)
for i in range(depth)])
use_fc_norm = self.global_pool == 'avg'
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
@torch.jit.ignore
def no_weight_decay(self):
nwd = {'pos_embed', 'cls_token'}
return nwd
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
)
return matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
if self.dynamic_img_size:
B, H, W, C = x.shape
if self.pos_embed is not None:
pos_embed = resample_abs_pos_embed(
self.pos_embed,
(H, W),
num_prefix_tokens=self.num_prefix_tokens,
)
else:
pos_embed = None
x = x.view(B, -1, C)
rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None
else:
pos_embed = self.pos_embed
rot_pos_embed = self.rope.get_embed() if self.rope is not None else None
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if pos_embed is not None:
x = x + pos_embed
x = self.pos_drop(x)
# obtain shared rotary position embedding and apply patch dropout
if self.patch_drop is not None:
x, keep_indices = self.patch_drop(x)
if rot_pos_embed is not None and keep_indices is not None:
rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices)
return x, rot_pos_embed
def forward_features(self, x):
x = self.patch_embed(x)
x, rot_pos_embed = self._pos_embed(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, rope=rot_pos_embed)
else:
x = blk(x, rope=rot_pos_embed)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(
state_dict,
model,
interpolation='bicubic',
antialias=True,
):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
state_dict = state_dict.get('model_ema', state_dict)
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('module', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
# prefix for loading OpenCLIP compatible weights
if 'visual.trunk.pos_embed' in state_dict:
prefix = 'visual.trunk.'
elif 'visual.pos_embed' in state_dict:
prefix = 'visual.'
else:
prefix = ''
mim_weights = prefix + 'mask_token' in state_dict
no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict
len_prefix = len(prefix)
for k, v in state_dict.items():
if prefix:
if k.startswith(prefix):
k = k[len_prefix:]
else:
continue
if 'rope' in k:
# fixed embedding no need to load buffer from checkpoint
continue
if 'patch_embed.proj.weight' in k:
_, _, H, W = model.patch_embed.proj.weight.shape
if v.shape[-1] != W or v.shape[-2] != H:
v = resample_patch_embed(
v,
(H, W),
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1)
v = resample_abs_pos_embed(
v,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
k = k.replace('mlp.ffn_ln', 'mlp.norm')
k = k.replace('attn.inner_attn_ln', 'attn.norm')
k = k.replace('mlp.w12', 'mlp.fc1')
k = k.replace('mlp.w1', 'mlp.fc1_g')
k = k.replace('mlp.w2', 'mlp.fc1_x')
k = k.replace('mlp.w3', 'mlp.fc2')
if no_qkv:
k = k.replace('q_bias', 'q_proj.bias')
k = k.replace('v_bias', 'v_proj.bias')
if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'):
if k == 'norm.weight' or k == 'norm.bias':
# try moving norm -> fc norm on fine-tune, probably a better starting point than new init
k = k.replace('norm', 'fc_norm')
else:
# skip pretrain mask token & head weights
continue
out_dict[k] = v
return out_dict
def _create_eva(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Eva models.')
model = build_model_with_cfg(
Eva, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
# EVA 01 CLIP fine-tuned on imagenet-1k
'eva_giant_patch14_224.clip_ft_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt',
hf_hub_id='timm/',
),
'eva_giant_patch14_336.clip_ft_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt',
hf_hub_id='timm/',
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
# MIM EVA 01 pretrain, ft on in22k -> in1k
'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg(
# hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'),
# in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune
'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg(
# hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash',
),
'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg(
# hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash',
),
'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg(
hf_hub_id='timm/',
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash',
),
# in22k or m3m MIM pretrain w/ in1k fine-tune
'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 336, 336), crop_pct=1.0,
),
'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 336, 336), crop_pct=1.0,
),
'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0,
),
'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0,
),
'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0,
),
# in22k or m3m MIM pretrain w/ in22k fine-tune
'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841,
),
'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841,
),
'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt',
hf_hub_id='timm/',
input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841,
),
# in22k or m38m MIM pretrain
'eva02_tiny_patch14_224.mim_in22k': _cfg(
# hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_small_patch14_224.mim_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_base_patch14_224.mim_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_large_patch14_224.mim_in22k': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
'eva02_large_patch14_224.mim_m38m': _cfg(
#hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt',
hf_hub_id='timm/',
num_classes=0,
),
# EVA01 and EVA02 CLIP image towers
'eva_giant_patch14_clip_224.laion400m': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt',
hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva_giant_patch14_clip_224.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt',
hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva02_base_patch16_clip_224.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt',
hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=512,
),
'eva02_large_patch14_clip_224.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt',
hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=768,
),
'eva02_large_patch14_clip_336.merged2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt',
hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
input_size=(3, 336, 336), crop_pct=1.0,
num_classes=768,
),
'eva02_enormous_patch14_clip_224.laion2b': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt',
hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt',
hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights
hf_hub_filename='open_clip_pytorch_model.bin',
num_classes=1024,
),
'eva02_enormous_patch14_clip_224.pretrain': _cfg(
# hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt',
num_classes=0,
),
})
@register_model
def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva:
""" EVA-g model https://arxiv.org/abs/2211.07636 """
model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408)
model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva:
""" EVA-g model https://arxiv.org/abs/2211.07636 """
model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408)
model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva:
""" EVA-g model https://arxiv.org/abs/2211.07636 """
model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408)
model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=768,
depth=12,
num_heads=12,
qkv_fused=False,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=336,
patch_size=14,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=336,
patch_size=14,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=448,
patch_size=14,
embed_dim=768,
depth=12,
num_heads=12,
qkv_fused=False,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva:
model_args = dict(
img_size=448,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
)
model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva:
""" EVA-g CLIP model (only difference from non-CLIP is the pooling) """
model_args = dict(
patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408,
global_pool=kwargs.pop('global_pool', 'token'))
model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_base """
model_args = dict(
img_size=224,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
qkv_fused=False,
mlp_ratio=4 * 2 / 3,
swiglu_mlp=True,
scale_mlp=True,
scale_attn_inner=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
scale_attn_inner=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """
model_args = dict(
img_size=336,
patch_size=14,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4 * 2 / 3,
qkv_fused=False,
swiglu_mlp=True,
scale_mlp=True,
scale_attn_inner=True,
use_rot_pos_emb=True,
ref_feat_shape=(16, 16), # 224/14
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva:
""" A EVA-CLIP specific variant that uses residual post-norm in blocks """
model_args = dict(
img_size=224,
patch_size=14,
embed_dim=1792,
depth=64,
num_heads=16,
mlp_ratio=15360 / 1792,
use_post_norm=True,
global_pool=kwargs.pop('global_pool', 'token'),
)
model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/eva.py/0 | {
"file_path": "pytorch-image-models/timm/models/eva.py",
"repo_id": "pytorch-image-models",
"token_count": 21637
} | 183 |
""" Pytorch Inception-V4 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_classifier, ConvNormAct
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['InceptionV4']
class Mixed3a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = conv_block(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed4a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed4a, self).__init__()
self.branch0 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(64, 96, kernel_size=(3, 3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed5a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed5a, self).__init__()
self.conv = conv_block(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class InceptionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionA, self).__init__()
self.branch0 = conv_block(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1),
conv_block(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionA, self).__init__()
self.branch0 = conv_block(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
conv_block(384, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=3, stride=1, padding=1),
conv_block(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionB, self).__init__()
self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.branch2 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionB, self).__init__()
self.branch0 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
conv_block(1024, 256, kernel_size=1, stride=1),
conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionC(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionC, self).__init__()
self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV4, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = 1536
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
features = [
conv_block(in_chans, 32, kernel_size=3, stride=2),
conv_block(32, 32, kernel_size=3, stride=1),
conv_block(32, 64, kernel_size=3, stride=1, padding=1),
Mixed3a(conv_block),
Mixed4a(conv_block),
Mixed5a(conv_block),
]
features += [InceptionA(conv_block) for _ in range(4)]
features += [ReductionA(conv_block)] # Mixed6a
features += [InceptionB(conv_block) for _ in range(7)]
features += [ReductionB(conv_block)] # Mixed7a
features += [InceptionC(conv_block) for _ in range(3)]
self.features = nn.Sequential(*features)
self.feature_info = [
dict(num_chs=64, reduction=2, module='features.2'),
dict(num_chs=160, reduction=4, module='features.3'),
dict(num_chs=384, reduction=8, module='features.9'),
dict(num_chs=1024, reduction=16, module='features.17'),
dict(num_chs=1536, reduction=32, module='features.21'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^features\.[012]\.',
blocks=r'^features\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4:
return build_model_with_cfg(
InceptionV4,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
default_cfgs = generate_default_cfgs({
'inception_v4.tf_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'features.0.conv', 'classifier': 'last_linear',
}
})
@register_model
def inception_v4(pretrained=False, **kwargs):
return _create_inception_v4('inception_v4', pretrained, **kwargs)
| pytorch-image-models/timm/models/inception_v4.py/0 | {
"file_path": "pytorch-image-models/timm/models/inception_v4.py",
"repo_id": "pytorch-image-models",
"token_count": 5528
} | 184 |
""" TinyViT
Paper: `TinyViT: Fast Pretraining Distillation for Small Vision Transformers`
- https://arxiv.org/abs/2207.10666
Adapted from official impl at https://github.com/microsoft/Cream/tree/main/TinyViT
"""
__all__ = ['TinyVit']
import math
import itertools
from functools import partial
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath,\
trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(torch.nn.Sequential):
def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self.conv, self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Conv2d(
w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchEmbed(nn.Module):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.stride = 4
self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1)
self.act = act_layer()
self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1)
def forward(self, x):
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
return x
class MBConv(nn.Module):
def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path):
super().__init__()
mid_chs = int(in_chs * expand_ratio)
self.conv1 = ConvNorm(in_chs, mid_chs, ks=1)
self.act1 = act_layer()
self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs)
self.act2 = act_layer()
self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0)
self.act3 = act_layer()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop_path(x)
x += shortcut
x = self.act3(x)
return x
class PatchMerging(nn.Module):
def __init__(self, dim, out_dim, act_layer):
super().__init__()
self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0)
self.act1 = act_layer()
self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim)
self.act2 = act_layer()
self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0)
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
return x
class ConvLayer(nn.Module):
def __init__(
self,
dim,
depth,
act_layer,
drop_path=0.,
conv_expand_ratio=4.,
):
super().__init__()
self.dim = dim
self.depth = depth
self.blocks = nn.Sequential(*[
MBConv(
dim, dim, conv_expand_ratio, act_layer,
drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(depth)
])
def forward(self, x):
x = self.blocks(x)
return x
class NormMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
drop=0.,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.norm = norm_layer(in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop)
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop)
def forward(self, x):
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Attention(torch.nn.Module):
fused_attn: torch.jit.Final[bool]
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=(14, 14),
):
super().__init__()
assert isinstance(resolution, tuple) and len(resolution) == 2
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.val_dim = int(attn_ratio * key_dim)
self.out_dim = self.val_dim * num_heads
self.attn_ratio = attn_ratio
self.resolution = resolution
self.fused_attn = use_fused_attn()
self.norm = nn.LayerNorm(dim)
self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim))
self.proj = nn.Linear(self.out_dim, dim)
points = list(itertools.product(range(resolution[0]), range(resolution[1])))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
attn_bias = self.get_attention_biases(x.device)
B, N, _ = x.shape
# Normalization
x = self.norm(x)
qkv = self.qkv(x)
# (B, N, num_heads, d)
q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3)
# (B, num_heads, N, d)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn + attn_bias
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, self.out_dim)
x = self.proj(x)
return x
class TinyVitBlock(nn.Module):
""" TinyViT Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
local_conv_size (int): the kernel size of the convolution between
Attention and MLP. Default: 3
act_layer: the activation function. Default: nn.GELU
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
local_conv_size=3,
act_layer=nn.GELU
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
assert window_size > 0, 'window_size must be greater than 0'
self.window_size = window_size
self.mlp_ratio = mlp_ratio
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
head_dim = dim // num_heads
window_resolution = (window_size, window_size)
self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = NormMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
pad = local_conv_size // 2
self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
def forward(self, x):
B, H, W, C = x.shape
L = H * W
shortcut = x
if H == self.window_size and W == self.window_size:
x = x.reshape(B, L, C)
x = self.attn(x)
x = x.view(B, H, W, C)
else:
pad_b = (self.window_size - H % self.window_size) % self.window_size
pad_r = (self.window_size - W % self.window_size) % self.window_size
padding = pad_b > 0 or pad_r > 0
if padding:
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
# window partition
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_size
nW = pW // self.window_size
x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
B * nH * nW, self.window_size * self.window_size, C
)
x = self.attn(x)
# window reverse
x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C)
if padding:
x = x[:, :H, :W].contiguous()
x = shortcut + self.drop_path1(x)
x = x.permute(0, 3, 1, 2)
x = self.local_conv(x)
x = x.reshape(B, C, L).transpose(1, 2)
x = x + self.drop_path2(self.mlp(x))
return x.view(B, H, W, C)
def extra_repr(self) -> str:
return f"dim={self.dim}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
register_notrace_module(TinyVitBlock)
class TinyVitStage(nn.Module):
""" A basic TinyViT layer for one stage.
Args:
dim (int): Number of input channels.
out_dim: the output dimension of the layer
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
act_layer: the activation function. Default: nn.GELU
"""
def __init__(
self,
dim,
out_dim,
depth,
num_heads,
window_size,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
downsample=None,
local_conv_size=3,
act_layer=nn.GELU,
):
super().__init__()
self.depth = depth
self.out_dim = out_dim
# patch merging layer
if downsample is not None:
self.downsample = downsample(
dim=dim,
out_dim=out_dim,
act_layer=act_layer,
)
else:
self.downsample = nn.Identity()
assert dim == out_dim
# build blocks
self.blocks = nn.Sequential(*[
TinyVitBlock(
dim=out_dim,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
local_conv_size=local_conv_size,
act_layer=act_layer,
)
for i in range(depth)])
def forward(self, x):
x = self.downsample(x)
x = x.permute(0, 2, 3, 1) # BCHW -> BHWC
x = self.blocks(x)
x = x.permute(0, 3, 1, 2) # BHWC -> BCHW
return x
def extra_repr(self) -> str:
return f"dim={self.out_dim}, depth={self.depth}"
class TinyVit(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dims=(96, 192, 384, 768),
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
window_sizes=(7, 7, 14, 7),
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
act_layer=nn.GELU,
):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.num_stages = len(depths)
self.mlp_ratio = mlp_ratio
self.grad_checkpointing = use_checkpoint
self.patch_embed = PatchEmbed(
in_chs=in_chans,
out_chs=embed_dims[0],
act_layer=act_layer,
)
# stochastic depth rate rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# build stages
self.stages = nn.Sequential()
stride = self.patch_embed.stride
prev_dim = embed_dims[0]
self.feature_info = []
for stage_idx in range(self.num_stages):
if stage_idx == 0:
stage = ConvLayer(
dim=prev_dim,
depth=depths[stage_idx],
act_layer=act_layer,
drop_path=dpr[:depths[stage_idx]],
conv_expand_ratio=mbconv_expand_ratio,
)
else:
out_dim = embed_dims[stage_idx]
drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])]
stage = TinyVitStage(
dim=embed_dims[stage_idx - 1],
out_dim=out_dim,
depth=depths[stage_idx],
num_heads=num_heads[stage_idx],
window_size=window_sizes[stage_idx],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
local_conv_size=local_conv_size,
drop_path=drop_path_rate,
downsample=PatchMerging,
act_layer=act_layer,
)
prev_dim = out_dim
stride *= 2
self.stages.append(stage)
self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')]
# Classifier head
self.num_features = embed_dims[-1]
norm_layer_cf = partial(LayerNorm2d, eps=1e-5)
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
norm_layer=norm_layer_cf,
)
# init weights
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'attention_biases'}
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x):
x = self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict.keys():
state_dict = state_dict['model']
target_sd = model.state_dict()
out_dict = {}
for k, v in state_dict.items():
if k.endswith('attention_bias_idxs'):
continue
if 'attention_biases' in k:
# TODO: whether move this func into model for dynamic input resolution? (high risk)
v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv1.conv',
'classifier': 'head.fc',
'pool_size': (7, 7),
'input_size': (3, 224, 224),
'crop_pct': 0.95,
**kwargs,
}
default_cfgs = generate_default_cfgs({
'tiny_vit_5m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth'
),
'tiny_vit_5m_224.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth'
),
'tiny_vit_11m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth'
),
'tiny_vit_11m_224.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth'
),
'tiny_vit_21m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth'
),
'tiny_vit_21m_224.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth'
),
'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash',
),
})
def _create_tiny_vit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
TinyVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs
)
return model
@register_model
def tiny_vit_5m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[64, 128, 160, 320],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 5, 10],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.0,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_11m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[64, 128, 256, 448],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 14],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.2,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_384(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[12, 12, 24, 12],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_512(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[16, 16, 32, 16],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs)
| pytorch-image-models/timm/models/tiny_vit.py/0 | {
"file_path": "pytorch-image-models/timm/models/tiny_vit.py",
"repo_id": "pytorch-image-models",
"token_count": 12415
} | 185 |
import math
import torch
from torch.optim.optimizer import Optimizer
class AdaBelief(Optimizer):
r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-16)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
decoupled_decay (boolean, optional): (default: True) If set as True, then
the optimizer uses decoupled weight decay as in AdamW
fixed_decay (boolean, optional): (default: False) This is used when weight_decouple
is set as True.
When fixed_decay == True, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay$.
When fixed_decay == False, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the
weight decay ratio decreases with learning rate (lr).
rectify (boolean, optional): (default: True) If set as True, then perform the rectified
update similar to RAdam
degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update
when variance of gradient is high
reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020
For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer'
For example train/args for EfficientNet see these gists
- link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037
- link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3
"""
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False,
decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad,
degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify,
fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)])
super(AdaBelief, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaBelief, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
amsgrad = group['amsgrad']
# State initialization
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
'AdaBelief does not support sparse gradients, please consider SparseAdam instead')
p_fp32 = p
if p.dtype in {torch.float16, torch.bfloat16}:
p_fp32 = p_fp32.float()
amsgrad = group['amsgrad']
beta1, beta2 = group['betas']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p_fp32)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(p_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(p_fp32)
# perform weight decay, check if decoupled weight decay
if group['decoupled_decay']:
if not group['fixed_decay']:
p_fp32.mul_(1.0 - group['lr'] * group['weight_decay'])
else:
p_fp32.mul_(1.0 - group['weight_decay'])
else:
if group['weight_decay'] != 0:
grad.add_(p_fp32, alpha=group['weight_decay'])
# get current state variable
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Update first and second moment running average
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
grad_residual = grad - exp_avg
exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2)
if amsgrad:
max_exp_avg_var = state['max_exp_avg_var']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
# update
if not group['rectify']:
# Default update
step_size = group['lr'] / bias_correction1
p_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
# Rectified update, forked from RAdam
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
num_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
num_sma_max = 2 / (1 - beta2) - 1
num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = num_sma
# more conservative since it's an approximated value
if num_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) *
(num_sma - 4) / (num_sma_max - 4) *
(num_sma - 2) / num_sma *
num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step'])
elif group['degenerated_to_sgd']:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
if num_sma >= 5:
denom = exp_avg_var.sqrt().add_(group['eps'])
p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr'])
elif step_size > 0:
p_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
if p.dtype in {torch.float16, torch.bfloat16}:
p.copy_(p_fp32)
return loss
| pytorch-image-models/timm/optim/adabelief.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adabelief.py",
"repo_id": "pytorch-image-models",
"token_count": 5074
} | 186 |
""" RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2021 Ross Wightman
"""
import torch
from torch.optim import Optimizer
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing (decay) constant (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer
update as per defaults in Tensorflow
"""
def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,
decoupled_decay=False, lr_in_momentum=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(
lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,
decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if group['decoupled_decay']:
p.mul_(1. - group['lr'] * group['weight_decay'])
else:
grad = grad.add(p, alpha=group['weight_decay'])
# Tensorflow order of ops for updating squared avg
square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha)
# square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha)
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt
# grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
# Tensorflow accumulates the LR scaling in the momentum buffer
if group['lr_in_momentum']:
buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr'])
p.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.add_(buf, alpha=-group['lr'])
else:
p.addcdiv_(grad, avg, value=-group['lr'])
return loss
| pytorch-image-models/timm/optim/rmsprop_tf.py/0 | {
"file_path": "pytorch-image-models/timm/optim/rmsprop_tf.py",
"repo_id": "pytorch-image-models",
"token_count": 2901
} | 187 |
""" CUDA / AMP utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
try:
from apex import amp
has_apex = True
except ImportError:
amp = None
has_apex = False
from .clip_grad import dispatch_clip_grad
class ApexScaler:
state_dict_key = "amp"
def __call__(
self,
loss,
optimizer,
clip_grad=None,
clip_mode='norm',
parameters=None,
create_graph=False,
need_update=True,
):
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(create_graph=create_graph)
if need_update:
if clip_grad is not None:
dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode)
optimizer.step()
def state_dict(self):
if 'state_dict' in amp.__dict__:
return amp.state_dict()
def load_state_dict(self, state_dict):
if 'load_state_dict' in amp.__dict__:
amp.load_state_dict(state_dict)
class NativeScaler:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(
self,
loss,
optimizer,
clip_grad=None,
clip_mode='norm',
parameters=None,
create_graph=False,
need_update=True,
):
self._scaler.scale(loss).backward(create_graph=create_graph)
if need_update:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
self._scaler.step(optimizer)
self._scaler.update()
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
| pytorch-image-models/timm/utils/cuda.py/0 | {
"file_path": "pytorch-image-models/timm/utils/cuda.py",
"repo_id": "pytorch-image-models",
"token_count": 980
} | 188 |
<div align="center">
<a href="https://www.youtube.com/watch?v=jlMAX2Oaht0">
<img width=560 width=315 alt="Making TGI deployment optimal" src="https://huggingface.co/datasets/Narsil/tgi_assets/resolve/main/thumbnail.png">
</a>
# Text Generation Inference
<a href="https://github.com/huggingface/text-generation-inference">
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social">
</a>
<a href="https://huggingface.github.io/text-generation-inference">
<img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational">
</a>
A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co)
to power Hugging Chat, the Inference API and Inference Endpoint.
</div>
## Table of contents
- [Get Started](#get-started)
- [API Documentation](#api-documentation)
- [Using a private or gated model](#using-a-private-or-gated-model)
- [A note on Shared Memory](#a-note-on-shared-memory-shm)
- [Distributed Tracing](#distributed-tracing)
- [Local Install](#local-install)
- [CUDA Kernels](#cuda-kernels)
- [Optimized architectures](#optimized-architectures)
- [Run Mistral](#run-a-model)
- [Run](#run)
- [Quantization](#quantization)
- [Develop](#develop)
- [Testing](#testing)
Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and [more](https://huggingface.co/docs/text-generation-inference/supported_models). TGI implements many features, such as:
- Simple launcher to serve most popular LLMs
- Production ready (distributed tracing with Open Telemetry, Prometheus metrics)
- Tensor Parallelism for faster inference on multiple GPUs
- Token streaming using Server-Sent Events (SSE)
- Continuous batching of incoming requests for increased total throughput
- Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures
- Quantization with :
- [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)
- [GPT-Q](https://arxiv.org/abs/2210.17323)
- [EETQ](https://github.com/NetEase-FuXi/EETQ)
- [AWQ](https://github.com/casper-hansen/AutoAWQ)
- [Safetensors](https://github.com/huggingface/safetensors) weight loading
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
- Logits warper (temperature scaling, top-p, top-k, repetition penalty, more details see [transformers.LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor))
- Stop sequences
- Log probabilities
- [Speculation](https://huggingface.co/docs/text-generation-inference/conceptual/speculation) ~2x latency
- [Guidance/JSON](https://huggingface.co/docs/text-generation-inference/conceptual/guidance). Specify output format to speed up inference and make sure the output is valid according to some specs..
- Custom Prompt Generation: Easily generate text by providing custom prompts to guide the model's output
- Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance
### Hardware support
- [Nvidia](https://github.com/huggingface/text-generation-inference/pkgs/container/text-generation-inference)
- [AMD](https://github.com/huggingface/text-generation-inference/pkgs/container/text-generation-inference) (-rocm)
- [Inferentia](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference)
- [Intel GPU](https://github.com/huggingface/text-generation-inference/pull/1475)
- [Gaudi](https://github.com/huggingface/tgi-gaudi)
## Get Started
### Docker
For a detailed starting guide, please see the [Quick Tour](https://huggingface.co/docs/text-generation-inference/quicktour). The easiest way of getting started is using the official Docker container:
```shell
model=HuggingFaceH4/zephyr-7b-beta
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id $model
```
And then you can make requests like
```bash
curl 127.0.0.1:8080/generate \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \
-H 'Content-Type: application/json'
```
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4-rocm --model-id $model` instead of the command above.
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
```
text-generation-launcher --help
```
### API documentation
You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route.
The Swagger UI is also available at: [https://huggingface.github.io/text-generation-inference](https://huggingface.github.io/text-generation-inference).
### Using a private or gated model
You have the option to utilize the `HUGGING_FACE_HUB_TOKEN` environment variable for configuring the token employed by
`text-generation-inference`. This allows you to gain access to protected resources.
For example, if you want to serve the gated Llama V2 model variants:
1. Go to https://huggingface.co/settings/tokens
2. Copy your cli READ token
3. Export `HUGGING_FACE_HUB_TOKEN=<your cli READ token>`
or with Docker:
```shell
model=meta-llama/Llama-2-7b-chat-hf
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
token=<your cli READ token>
docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 --model-id $model
```
### A note on Shared Memory (shm)
[`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by
`PyTorch` to do distributed training/inference. `text-generation-inference` make
use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models.
In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if
peer-to-peer using NVLink or PCI is not possible.
To allow the container to use 1G of Shared Memory and support SHM sharing, we add `--shm-size 1g` on the above command.
If you are running `text-generation-inference` inside `Kubernetes`. You can also add Shared Memory to the container by
creating a volume with:
```yaml
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
```
and mounting it to `/dev/shm`.
Finally, you can also disable SHM sharing by using the `NCCL_SHM_DISABLE=1` environment variable. However, note that
this will impact performance.
### Distributed Tracing
`text-generation-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature
by setting the address to an OTLP collector with the `--otlp-endpoint` argument.
### Architecture

### Local install
You can also opt to install `text-generation-inference` locally.
First [install Rust](https://rustup.rs/) and create a Python virtual environment with at least
Python 3.9, e.g. using `conda`:
```shell
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
conda create -n text-generation-inference python=3.11
conda activate text-generation-inference
```
You may also need to install Protoc.
On Linux:
```shell
PROTOC_ZIP=protoc-21.12-linux-x86_64.zip
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP
sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
rm -f $PROTOC_ZIP
```
On MacOS, using Homebrew:
```shell
brew install protobuf
```
Then run:
```shell
BUILD_EXTENSIONS=True make install # Install repository and HF/transformer fork with CUDA kernels
text-generation-launcher --model-id mistralai/Mistral-7B-Instruct-v0.2
```
**Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run:
```shell
sudo apt-get install libssl-dev gcc -y
```
## Optimized architectures
TGI works out of the box to serve optimized models for all modern models. They can be found in [this list](https://huggingface.co/docs/text-generation-inference/supported_models).
Other architectures are supported on a best-effort basis using:
`AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")`
or
`AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")`
## Run locally
### Run
```shell
text-generation-launcher --model-id mistralai/Mistral-7B-Instruct-v0.2
```
### Quantization
You can also quantize the weights with bitsandbytes to reduce the VRAM requirement:
```shell
text-generation-launcher --model-id mistralai/Mistral-7B-Instruct-v0.2 --quantize
```
4bit quantization is available using the [NF4 and FP4 data types from bitsandbytes](https://arxiv.org/pdf/2305.14314.pdf). It can be enabled by providing `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` as a command line argument to `text-generation-launcher`.
## Develop
```shell
make server-dev
make router-dev
```
## Testing
```shell
# python
make python-server-tests
make python-client-tests
# or both server and client tests
make python-tests
# rust cargo tests
make rust-tests
# integration tests
make integration-tests
```
| text-generation-inference/README.md/0 | {
"file_path": "text-generation-inference/README.md",
"repo_id": "text-generation-inference",
"token_count": 3371
} | 189 |
[tool.poetry]
name = "text-generation"
version = "0.6.1"
description = "Hugging Face Text Generation Python Client"
license = "Apache-2.0"
authors = ["Olivier Dehaene <[email protected]>"]
maintainers = ["Olivier Dehaene <[email protected]>"]
readme = "README.md"
homepage = "https://github.com/huggingface/text-generation-inference"
repository = "https://github.com/huggingface/text-generation-inference"
[tool.poetry.dependencies]
python = "^3.7"
pydantic = "> 1.10, < 3"
aiohttp = "^3.8"
huggingface-hub = ">= 0.12, < 1.0"
[tool.poetry.dev-dependencies]
pytest = "^6.2.5"
pytest-asyncio = "^0.17.2"
pytest-cov = "^3.0.0"
[tool.pytest.ini_options]
asyncio_mode = "auto"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
| text-generation-inference/clients/python/pyproject.toml/0 | {
"file_path": "text-generation-inference/clients/python/pyproject.toml",
"repo_id": "text-generation-inference",
"token_count": 336
} | 190 |
# Text-generation-launcher arguments
<!-- WRAP CODE BLOCKS -->
```shell
Text Generation Launcher
Usage: text-generation-launcher [OPTIONS]
Options:
```
## MODEL_ID
```shell
--model-id <MODEL_ID>
The name of the model to load. Can be a MODEL_ID as listed on <https://hf.co/models> like `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. Or it can be a local directory containing the necessary files as saved by `save_pretrained(...)` methods of transformers
[env: MODEL_ID=]
[default: bigscience/bloom-560m]
```
## REVISION
```shell
--revision <REVISION>
The actual revision of the model if you're referring to a model on the hub. You can use a specific commit id or a branch like `refs/pr/2`
[env: REVISION=]
```
## VALIDATION_WORKERS
```shell
--validation-workers <VALIDATION_WORKERS>
The number of tokenizer workers used for payload validation and truncation inside the router
[env: VALIDATION_WORKERS=]
[default: 2]
```
## SHARDED
```shell
--sharded <SHARDED>
Whether to shard the model across multiple GPUs By default text-generation-inference will use all available GPUs to run the model. Setting it to `false` deactivates `num_shard`
[env: SHARDED=]
[possible values: true, false]
```
## NUM_SHARD
```shell
--num-shard <NUM_SHARD>
The number of shards to use if you don't want to use all GPUs on a given machine. You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance
[env: NUM_SHARD=]
```
## QUANTIZE
```shell
--quantize <QUANTIZE>
Whether you want the model to be quantized
[env: QUANTIZE=]
Possible values:
- awq: 4 bit quantization. Requires a specific AWQ quantized model: https://hf.co/models?search=awq. Should replace GPTQ models wherever possible because of the better latency
- eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from https://github.com/NetEase-FuXi/EETQ.git
- gptq: 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq. text-generation-inference will use exllama (faster) kernels wherever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels
- bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16
- bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16
- bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model
```
## SPECULATE
```shell
--speculate <SPECULATE>
The number of input_ids to speculate on If using a medusa model, the heads will be picked up automatically Other wise, it will use n-gram speculation which is relatively free in terms of compute, but the speedup heavily depends on the task
[env: SPECULATE=]
```
## DTYPE
```shell
--dtype <DTYPE>
The dtype to be forced upon the model. This option cannot be used with `--quantize`
[env: DTYPE=]
[possible values: float16, bfloat16]
```
## TRUST_REMOTE_CODE
```shell
--trust-remote-code
Whether you want to execute hub modelling code. Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision
[env: TRUST_REMOTE_CODE=]
```
## MAX_CONCURRENT_REQUESTS
```shell
--max-concurrent-requests <MAX_CONCURRENT_REQUESTS>
The maximum amount of concurrent requests for this particular deployment. Having a low limit will refuse clients requests instead of having them wait for too long and is usually good to handle backpressure correctly
[env: MAX_CONCURRENT_REQUESTS=]
[default: 128]
```
## MAX_BEST_OF
```shell
--max-best-of <MAX_BEST_OF>
This is the maximum allowed value for clients to set `best_of`. Best of makes `n` generations at the same time, and return the best in terms of overall log probability over the entire generated sequence
[env: MAX_BEST_OF=]
[default: 2]
```
## MAX_STOP_SEQUENCES
```shell
--max-stop-sequences <MAX_STOP_SEQUENCES>
This is the maximum allowed value for clients to set `stop_sequences`. Stop sequences are used to allow the model to stop on more than just the EOS token, and enable more complex "prompting" where users can preprompt the model in a specific way and define their "own" stop token aligned with their prompt
[env: MAX_STOP_SEQUENCES=]
[default: 4]
```
## MAX_TOP_N_TOKENS
```shell
--max-top-n-tokens <MAX_TOP_N_TOKENS>
This is the maximum allowed value for clients to set `top_n_tokens`. `top_n_tokens is used to return information about the the `n` most likely tokens at each generation step, instead of just the sampled token. This information can be used for downstream tasks like for classification or ranking
[env: MAX_TOP_N_TOKENS=]
[default: 5]
```
## MAX_INPUT_LENGTH
```shell
--max-input-length <MAX_INPUT_LENGTH>
This is the maximum allowed input length (expressed in number of tokens) for users. The larger this value, the longer prompt users can send which can impact the overall memory required to handle the load. Please note that some models have a finite range of sequence they can handle
[env: MAX_INPUT_LENGTH=]
[default: 1024]
```
## MAX_TOTAL_TOKENS
```shell
--max-total-tokens <MAX_TOTAL_TOKENS>
This is the most important value to set as it defines the "memory budget" of running clients requests. Clients will send input sequences and ask to generate `max_new_tokens` on top. with a value of `1512` users can send either a prompt of `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for `1511` max_new_tokens. The larger this value, the larger amount each request will be in your RAM and the less effective batching can be
[env: MAX_TOTAL_TOKENS=]
[default: 2048]
```
## WAITING_SERVED_RATIO
```shell
--waiting-served-ratio <WAITING_SERVED_RATIO>
This represents the ratio of waiting queries vs running queries where you want to start considering pausing the running queries to include the waiting ones into the same batch. `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's only 10 queries left in the current batch we check if we can fit those 12 waiting queries into the batching strategy, and if yes, then batching happens delaying the 10 running queries by a `prefill` run.
This setting is only applied if there is room in the batch as defined by `max_batch_total_tokens`.
[env: WAITING_SERVED_RATIO=]
[default: 1.2]
```
## MAX_BATCH_PREFILL_TOKENS
```shell
--max-batch-prefill-tokens <MAX_BATCH_PREFILL_TOKENS>
Limits the number of tokens for the prefill operation. Since this operation take the most memory and is compute bound, it is interesting to limit the number of requests that can be sent
[env: MAX_BATCH_PREFILL_TOKENS=]
[default: 4096]
```
## MAX_BATCH_TOTAL_TOKENS
```shell
--max-batch-total-tokens <MAX_BATCH_TOTAL_TOKENS>
**IMPORTANT** This is one critical control to allow maximum usage of the available hardware.
This represents the total amount of potential tokens within a batch. When using padding (not recommended) this would be equivalent of `batch_size` * `max_total_tokens`.
However in the non-padded (flash attention) version this can be much finer.
For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` or a single query of `1000` tokens.
Overall this number should be the largest possible amount that fits the remaining memory (after the model is loaded). Since the actual memory overhead depends on other parameters like if you're using quantization, flash attention or the model implementation, text-generation-inference cannot infer this number automatically.
[env: MAX_BATCH_TOTAL_TOKENS=]
```
## MAX_WAITING_TOKENS
```shell
--max-waiting-tokens <MAX_WAITING_TOKENS>
This setting defines how many tokens can be passed before forcing the waiting queries to be put on the batch (if the size of the batch allows for it). New queries require 1 `prefill` forward, which is different from `decode` and therefore you need to pause the running batch in order to run `prefill` to create the correct values for the waiting queries to be able to join the batch.
With a value too small, queries will always "steal" the compute to run `prefill` and running queries will be delayed by a lot.
With a value too big, waiting queries could wait for a very long time before being allowed a slot in the running batch. If your server is busy that means that requests that could run in ~2s on an empty server could end up running in ~20s because the query had to wait for 18s.
This number is expressed in number of tokens to make it a bit more "model" agnostic, but what should really matter is the overall latency for end users.
[env: MAX_WAITING_TOKENS=]
[default: 20]
```
## MAX_BATCH_SIZE
```shell
--max-batch-size <MAX_BATCH_SIZE>
Enforce a maximum number of requests per batch Specific flag for hardware targets that do not support unpadded inference
[env: MAX_BATCH_SIZE=]
```
## ENABLE_CUDA_GRAPHS
```shell
--enable-cuda-graphs
Enable experimental support for cuda graphs
[env: ENABLE_CUDA_GRAPHS=]
```
## HOSTNAME
```shell
--hostname <HOSTNAME>
The IP address to listen on
[env: HOSTNAME=]
[default: 0.0.0.0]
```
## PORT
```shell
-p, --port <PORT>
The port to listen on
[env: PORT=]
[default: 3000]
```
## SHARD_UDS_PATH
```shell
--shard-uds-path <SHARD_UDS_PATH>
The name of the socket for gRPC communication between the webserver and the shards
[env: SHARD_UDS_PATH=]
[default: /tmp/text-generation-server]
```
## MASTER_ADDR
```shell
--master-addr <MASTER_ADDR>
The address the master shard will listen on. (setting used by torch distributed)
[env: MASTER_ADDR=]
[default: localhost]
```
## MASTER_PORT
```shell
--master-port <MASTER_PORT>
The address the master port will listen on. (setting used by torch distributed)
[env: MASTER_PORT=]
[default: 29500]
```
## HUGGINGFACE_HUB_CACHE
```shell
--huggingface-hub-cache <HUGGINGFACE_HUB_CACHE>
The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance
[env: HUGGINGFACE_HUB_CACHE=]
```
## WEIGHTS_CACHE_OVERRIDE
```shell
--weights-cache-override <WEIGHTS_CACHE_OVERRIDE>
The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance
[env: WEIGHTS_CACHE_OVERRIDE=]
```
## DISABLE_CUSTOM_KERNELS
```shell
--disable-custom-kernels
For some models (like bloom), text-generation-inference implemented custom cuda kernels to speed up inference. Those kernels were only tested on A100. Use this flag to disable them if you're running on different hardware and encounter issues
[env: DISABLE_CUSTOM_KERNELS=]
```
## CUDA_MEMORY_FRACTION
```shell
--cuda-memory-fraction <CUDA_MEMORY_FRACTION>
Limit the CUDA available memory. The allowed value equals the total visible memory multiplied by cuda-memory-fraction
[env: CUDA_MEMORY_FRACTION=]
[default: 1.0]
```
## ROPE_SCALING
```shell
--rope-scaling <ROPE_SCALING>
Rope scaling will only be used for RoPE models and allow rescaling the position rotary to accomodate for larger prompts.
Goes together with `rope_factor`.
`--rope-factor 2.0` gives linear scaling with a factor of 2.0 `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0 `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed basically)
`--rope-scaling linear --rope-factor` fully describes the scaling you want
[env: ROPE_SCALING=]
[possible values: linear, dynamic]
```
## ROPE_FACTOR
```shell
--rope-factor <ROPE_FACTOR>
Rope scaling will only be used for RoPE models See `rope_scaling`
[env: ROPE_FACTOR=]
```
## JSON_OUTPUT
```shell
--json-output
Outputs the logs in JSON format (useful for telemetry)
[env: JSON_OUTPUT=]
```
## OTLP_ENDPOINT
```shell
--otlp-endpoint <OTLP_ENDPOINT>
[env: OTLP_ENDPOINT=]
```
## CORS_ALLOW_ORIGIN
```shell
--cors-allow-origin <CORS_ALLOW_ORIGIN>
[env: CORS_ALLOW_ORIGIN=]
```
## WATERMARK_GAMMA
```shell
--watermark-gamma <WATERMARK_GAMMA>
[env: WATERMARK_GAMMA=]
```
## WATERMARK_DELTA
```shell
--watermark-delta <WATERMARK_DELTA>
[env: WATERMARK_DELTA=]
```
## NGROK
```shell
--ngrok
Enable ngrok tunneling
[env: NGROK=]
```
## NGROK_AUTHTOKEN
```shell
--ngrok-authtoken <NGROK_AUTHTOKEN>
ngrok authentication token
[env: NGROK_AUTHTOKEN=]
```
## NGROK_EDGE
```shell
--ngrok-edge <NGROK_EDGE>
ngrok edge
[env: NGROK_EDGE=]
```
## TOKENIZER_CONFIG_PATH
```shell
--tokenizer-config-path <TOKENIZER_CONFIG_PATH>
The path to the tokenizer config file. This path is used to load the tokenizer configuration which may include a `chat_template`. If not provided, the default config will be used from the model hub
[env: TOKENIZER_CONFIG_PATH=]
```
## DISABLE_GRAMMAR_SUPPORT
```shell
--disable-grammar-support
Disable outlines grammar constrained generation. This is a feature that allows you to generate text that follows a specific grammar
[env: DISABLE_GRAMMAR_SUPPORT=]
```
## ENV
```shell
-e, --env
Display a lot of information about your runtime environment
```
## HELP
```shell
-h, --help
Print help (see a summary with '-h')
```
## VERSION
```shell
-V, --version
Print version
```
| text-generation-inference/docs/source/basic_tutorials/launcher.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/launcher.md",
"repo_id": "text-generation-inference",
"token_count": 6114
} | 191 |
# Supported Models and Hardware
Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models are hardware are supported.
## Supported Models
The following models are optimized and can be served with TGI, which uses custom CUDA kernels for better inference. You can add the flag `--disable-custom-kernels` at the end of the `docker run` command if you wish to disable them.
- [BLOOM](https://huggingface.co/bigscience/bloom)
- [FLAN-T5](https://huggingface.co/google/flan-t5-xxl)
- [Galactica](https://huggingface.co/facebook/galactica-120b)
- [GPT-Neox](https://huggingface.co/EleutherAI/gpt-neox-20b)
- [Llama](https://github.com/facebookresearch/llama)
- [OPT](https://huggingface.co/facebook/opt-66b)
- [SantaCoder](https://huggingface.co/bigcode/santacoder)
- [Starcoder](https://huggingface.co/bigcode/starcoder)
- [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b)
- [Falcon 40B](https://huggingface.co/tiiuae/falcon-40b)
- [MPT](https://huggingface.co/mosaicml/mpt-30b)
- [Llama V2](https://huggingface.co/meta-llama)
- [Code Llama](https://huggingface.co/codellama)
- [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)
- [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
- [Phi](https://huggingface.co/microsoft/phi-2)
If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models:
```python
# for causal LMs/text-generation models
AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")`
# or, for text-to-text generation models
AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")
```
If you wish to serve a supported model that already exists on a local folder, just point to the local folder.
```bash
text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM>
``````
## Supported Hardware
TGI optimized models are supported on NVIDIA [A100](https://www.nvidia.com/en-us/data-center/a100/), [A10G](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) and [T4](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPUs with CUDA 12.2+. Note that you have to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) to use it. For other NVIDIA GPUs, continuous batching will still apply, but some operations like flash attention and paged attention will not be executed.
TGI also has support of ROCm-enabled AMD Instinct MI210 and MI250 GPUs, with paged attention, GPTQ quantization, flash attention v2 support. The following features are currently not supported in the ROCm version of TGI, and the supported may be extended in the future:
* Loading [AWQ](https://huggingface.co/docs/transformers/quantization#awq) checkpoints.
* Flash [layer norm kernel](https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm)
* Kernel for sliding window attention (Mistral)
TGI is also supported on the following AI hardware accelerators:
- *Habana first-gen Gaudi and Gaudi2:* check out this [repository](https://github.com/huggingface/tgi-gaudi) to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index)
* *AWS Inferentia2:* check out this [guide](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference) on how to serve models with TGI on Inferentia2.
| text-generation-inference/docs/source/supported_models.md/0 | {
"file_path": "text-generation-inference/docs/source/supported_models.md",
"repo_id": "text-generation-inference",
"token_count": 1169
} | 192 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2,
"logprob": null,
"text": "<bos>"
},
{
"id": 2015,
"logprob": -10.0,
"text": "Test"
},
{
"id": 3853,
"logprob": -10.875,
"text": " request"
}
],
"seed": 0,
"tokens": [
{
"id": 7539,
"logprob": -0.73046875,
"special": false,
"text": " forms"
},
{
"id": 708,
"logprob": 0.0,
"special": false,
"text": " are"
},
{
"id": 671,
"logprob": -1.703125,
"special": false,
"text": " an"
},
{
"id": 8727,
"logprob": 0.0,
"special": false,
"text": " essential"
},
{
"id": 1702,
"logprob": 0.0,
"special": false,
"text": " part"
},
{
"id": 576,
"logprob": 0.0,
"special": false,
"text": " of"
},
{
"id": 573,
"logprob": 0.0,
"special": false,
"text": " the"
},
{
"id": 11859,
"logprob": -1.6953125,
"special": false,
"text": " lab"
},
{
"id": 2185,
"logprob": -1.3125,
"special": false,
"text": " process"
},
{
"id": 578,
"logprob": -1.5,
"special": false,
"text": " and"
}
],
"top_tokens": null
},
"generated_text": "Test request forms are an essential part of the lab process and"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1031
} | 193 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.54785156,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4091797,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0273438,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94433594,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.81347656,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2958984,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0644531,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9580078,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5073242,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1816406,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json",
"repo_id": "text-generation-inference",
"token_count": 1050
} | 194 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 60,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6328125,
"text": " print"
},
{
"id": 81,
"logprob": -1.6035156,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9882812,
"text": "hello"
}
],
"seed": 0,
"tokens": [
{
"id": 2262,
"logprob": -0.042999268,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -0.38549805,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.5229492,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.10632324,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -0.20141602,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7656,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 711,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.16027832,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 313,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 636,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 27,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7656,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 381,
"logprob": 0.0,
"special": false,
"text": "age"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 30,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 11442,
"logprob": 0.0,
"special": false,
"text": " age"
},
{
"id": 711,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 313,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 636,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 313,
"logprob": -0.6328125,
"special": false,
"text": " \""
},
{
"id": 313,
"logprob": -1.7011719,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 596,
"logprob": 0.0,
"special": false,
"text": " str"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 381,
"logprob": 0.0,
"special": false,
"text": "age"
},
{
"id": 490,
"logprob": 0.0,
"special": false,
"text": "))"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json",
"repo_id": "text-generation-inference",
"token_count": 4734
} | 195 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 5,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": 0,
"tokens": [
{
"id": 926,
"logprob": -4.3554688,
"special": false,
"text": " To"
},
{
"id": 18295,
"logprob": -7.7734375,
"special": false,
"text": " sell"
},
{
"id": 7868,
"logprob": -3.9257812,
"special": false,
"text": " things"
},
{
"id": 260,
"logprob": -2.4179688,
"special": false,
"text": "."
},
{
"id": 1,
"logprob": 0.0,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "To sell things."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json",
"repo_id": "text-generation-inference",
"token_count": 532
} | 196 |
import pytest
@pytest.fixture(scope="module")
def flash_llama_awq_handle(launcher):
with launcher(
"abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq",
num_shard=1,
quantize="awq",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_awq(flash_llama_awq_handle):
await flash_llama_awq_handle.health(300)
return flash_llama_awq_handle.client
@pytest.mark.asyncio
async def test_flash_llama_awq(flash_llama_awq, response_snapshot):
response = await flash_llama_awq.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
)
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_llama_awq_all_params(flash_llama_awq, response_snapshot):
response = await flash_llama_awq.generate(
"What is Deep Learning?",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_llama_awq_load(flash_llama_awq, generate_load, response_snapshot):
responses = await generate_load(
flash_llama_awq, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[
r.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
for r in responses
]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_awq.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_awq.py",
"repo_id": "text-generation-inference",
"token_count": 842
} | 197 |
import pytest
@pytest.fixture(scope="module")
def flash_starcoder_gptq_handle(launcher):
with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_starcoder_gptq(flash_starcoder_gptq_handle):
await flash_starcoder_gptq_handle.health(300)
return flash_starcoder_gptq_handle.client
@pytest.mark.asyncio
async def test_flash_starcoder_gptq(flash_starcoder_gptq, generous_response_snapshot):
response = await flash_starcoder_gptq.generate(
"def geometric_mean(L: List[float]):",
max_new_tokens=20,
decoder_input_details=True,
)
assert response.details.generated_tokens == 20
assert response == generous_response_snapshot
@pytest.mark.asyncio
async def test_flash_starcoder_gptq_default_params(
flash_starcoder_gptq, generous_response_snapshot
):
response = await flash_starcoder_gptq.generate(
"def geometric_mean(L: List[float]):",
max_new_tokens=20,
temperature=0.2,
top_p=0.95,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 20
assert response == generous_response_snapshot
@pytest.mark.asyncio
async def test_flash_starcoder_gptq_load(
flash_starcoder_gptq, generate_load, generous_response_snapshot
):
responses = await generate_load(
flash_starcoder_gptq,
"def geometric_mean(L: List[float]):",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == generous_response_snapshot
| text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py",
"repo_id": "text-generation-inference",
"token_count": 710
} | 198 |
use std::fmt;
use std::process::Command;
pub(crate) struct Env {
cargo_target: &'static str,
cargo_version: &'static str,
git_sha: &'static str,
docker_label: &'static str,
nvidia_env: String,
}
impl Env {
pub fn new() -> Self {
let nvidia_env = nvidia_smi();
Self {
nvidia_env: nvidia_env.unwrap_or("N/A".to_string()),
cargo_target: env!("VERGEN_CARGO_TARGET_TRIPLE"),
cargo_version: env!("VERGEN_RUSTC_SEMVER"),
git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"),
docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"),
}
}
}
impl fmt::Display for Env {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Runtime environment:")?;
writeln!(f, "Target: {}", self.cargo_target)?;
writeln!(f, "Cargo version: {}", self.cargo_version)?;
writeln!(f, "Commit sha: {}", self.git_sha)?;
writeln!(f, "Docker label: {}", self.docker_label)?;
write!(f, "nvidia-smi:\n{}", self.nvidia_env)?;
Ok(())
}
}
fn nvidia_smi() -> Option<String> {
let output = Command::new("nvidia-smi").output().ok()?;
let nvidia_smi = String::from_utf8(output.stdout).ok()?;
let output = nvidia_smi.replace('\n', "\n ");
Some(output.trim().to_string())
}
| text-generation-inference/launcher/src/env_runtime.rs/0 | {
"file_path": "text-generation-inference/launcher/src/env_runtime.rs",
"repo_id": "text-generation-inference",
"token_count": 650
} | 199 |
[package]
name = "grpc-metadata"
version = "0.1.0"
edition = "2021"
[dependencies]
opentelemetry = "^0.20"
tonic = "^0.10"
tracing = "^0.1"
tracing-opentelemetry = "^0.21"
| text-generation-inference/router/grpc-metadata/Cargo.toml/0 | {
"file_path": "text-generation-inference/router/grpc-metadata/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 83
} | 200 |
flash_att_v2_commit_cuda := 02ac572f3ffc4f402e4183aaa6824b45859d3ed3
flash_att_v2_commit_rocm := 8736558c287ff2ef28b24878e42828c595ac3e69
flash-attention-v2-cuda:
# Clone flash attention
pip install -U packaging ninja --no-cache-dir
git clone https://github.com/HazyResearch/flash-attention.git flash-attention-v2
build-flash-attention-v2-cuda: flash-attention-v2-cuda
cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_cuda)
cd flash-attention-v2 && git submodule update --init --recursive
cd flash-attention-v2 && python setup.py build
install-flash-attention-v2-cuda: build-flash-attention-v2-cuda
cd flash-attention-v2 && git submodule update --init --recursive && python setup.py install
flash-attention-v2-rocm:
# Clone flash attention
pip install -U packaging ninja --no-cache-dir
git clone https://github.com/fxmarty/flash-attention-rocm flash-attention-v2
build-flash-attention-v2-rocm: flash-attention-v2-rocm
cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_rocm)
cd flash-attention-v2 && git submodule update --init --recursive
cd flash-attention-v2 && PYTORCH_ROCM_ARCH=gfx90a python setup.py build
install-flash-attention-v2-rocm: build-flash-attention-v2-rocm
cd flash-attention-v2 && git submodule update --init --recursive && python setup.py install
| text-generation-inference/server/Makefile-flash-att-v2/0 | {
"file_path": "text-generation-inference/server/Makefile-flash-att-v2",
"repo_id": "text-generation-inference",
"token_count": 496
} | 201 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include "util.cuh"
#include "tuning.h"
#include "cuda_buffers.cuh"
#include "cuda_func/q4_matrix.cuh"
#include "cuda_func/q4_matmul.cuh"
#include "cuda_func/column_remap.cuh"
// Check CUDA return code. We don't want to include Torch headers in the .cu files because parsing them adds almost a
// minute to the compile time on a 12900K. Also passing exceptions back to Python is super tricky, so in place of
// exceptions, CUDA functions return with a cudaError_t which we can parse and dump to the console.
void check_cuda(cudaError_t ret)
{
switch (ret)
{
case cudaSuccess:
break;
case cudaUnspecified:
printf(" **** Unspecified error\n");
TORCH_CHECK(false, "CUDA error");
break;
default:
printf(" **** CUDA error\n"); \
printf(" **** %s\n", cudaGetErrorString(ret)); \
TORCH_CHECK(false, "CUDA error"); \
break;
}
}
// Some decluttering macros
#define STRINGIFY_(__x) #__x
#define STRINGIFY(__x) STRINGIFY_(__x)
#define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
#define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
#define TORCH_CHECK_SHAPE_MOD(__x, __dim_x, __mod) TORCH_CHECK((__x).size(__dim_x) % __mod == 0, #__x ".shape[" STRINGIFY(__dim_x) "] must be a multiple of " STRINGIFY(__mod))
#define TORCH_CHECK_DEVICE_INDEX(__index) \
do { \
TORCH_CHECK(__index >= 0, "no device index"); \
TORCH_CHECK(__index < CUDA_MAX_DEVICES, "invalid device index"); \
} while(0)
#define TORCH_CHECK_QUANT(__w, __w_scales, __w_zeros, __seq_g_idx, __x_map) \
do { \
TORCH_CHECK_DTYPE(__w, kInt); \
TORCH_CHECK_DTYPE(__w_scales, kHalf); \
TORCH_CHECK_DTYPE(__w_zeros, kInt); \
TORCH_CHECK_DTYPE_OPT(__seq_g_idx, kShort); \
TORCH_CHECK_DTYPE_OPT(__x_map, kInt); \
TORCH_CHECK_SHAPES_OPT(__seq_g_idx, 0, __w, 0, 2 * 8); \
TORCH_CHECK_SHAPES_OPT(__x_map, 0, __w, 0, 8); \
} while(0)
int get_groupsize(torch::Tensor w, torch::Tensor w_zeros)
{
int groupsize = w.size(0) * 8 / w_zeros.size(0);
TORCH_CHECK(groupsize * w_zeros.size(0) == w.size(0) * 8, "w.shape[-2] must be a multiple of zeros.shape[-2]")
return groupsize;
}
// Tuning parameters
ExLlamaTuning tuningParams;
void set_tuning_params
(
int matmul_recons_thd,
bool matmul_fused_remap,
bool matmul_no_half2
)
{
tuningParams.matmul_recons_thd = matmul_recons_thd;
tuningParams.matmul_fused_remap = matmul_fused_remap;
tuningParams.matmul_no_half2 = matmul_no_half2;
}
// Release all unmanaged objects allocated by the extension
void cleanup()
{
cleanup_buffers_cuda();
g_q4_free_matrices();
}
// Prepare buffers for forward pass
void prepare_buffers
(
torch::Device device,
torch::Tensor temp_state,
torch::Tensor temp_dq
)
{
int device_index = device.index();
TORCH_CHECK_DEVICE_INDEX(device_index);
const at::cuda::OptionalCUDAGuard device_guard(device);
prepare_buffers_cuda
(
device_index,
(half*) temp_state.data_ptr(),
(half*) temp_dq.data_ptr()
);
}
// Create Q4Matrix, return handle
uintptr_t make_q4
(
torch::Tensor qweight,
torch::Tensor qzeros,
torch::Tensor scales,
torch::Tensor g_idx,
int device
)
{
TORCH_CHECK_DTYPE(qweight, kInt);
TORCH_CHECK_DTYPE(qzeros, kInt);
TORCH_CHECK_DTYPE(scales, kHalf);
TORCH_CHECK_DTYPE_OPT(g_idx, kInt);
TORCH_CHECK_SHAPES(qweight, 1, qzeros, 1, 8);
TORCH_CHECK_SHAPES(scales, 1, qweight, 1, 1);
TORCH_CHECK_SHAPES(qzeros, 0, scales, 0, 1);
int width = qweight.size(1);
int height = qweight.size(0) * 8;
int groups = qzeros.size(0);
Q4Matrix* m = new Q4Matrix
(
height,
width,
groups,
(uint32_t*) qweight.data_ptr(),
(uint32_t*) qzeros.data_ptr(),
(half*) scales.data_ptr(),
g_idx.device().is_meta() ? NULL : (uint32_t*) g_idx.data_ptr(),
device
);
g_q4_keep_matrix(m);
return reinterpret_cast<uintptr_t> (m);
}
// Matmul half @ quant -> half
void q4_matmul
(
torch::Tensor x,
uintptr_t w,
torch::Tensor out
)
{
Q4Matrix* wm = reinterpret_cast<Q4Matrix*> (w);
TORCH_CHECK_DTYPE(x, kHalf);
TORCH_CHECK_DTYPE(out, kHalf);
TORCH_CHECK_SHAPES(x, 0, out, 0, 1);
TORCH_CHECK(wm->height == x.size(-1), "x and w have incompatible shapes")
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
int x_height = x.size(0);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (tuningParams.matmul_recons_thd == 0 || x_height < tuningParams.matmul_recons_thd)
{
q4_matmul_cuda
(
&tuningParams,
(half*) x.data_ptr(),
x_height,
wm,
(half*) out.data_ptr(),
false,
stream
);
}
else
{
q4_matmul_recons_cuda
(
&tuningParams,
(half*) x.data_ptr(),
x_height,
wm,
(half*) out.data_ptr(),
false,
at::cuda::getCurrentCUDABlasHandle()
);
}
}
// Remap columns in half tensor
void column_remap
(
torch::Tensor x,
torch::Tensor x_new,
torch::Tensor x_map
)
{
TORCH_CHECK_DTYPE(x, kHalf);
TORCH_CHECK_DTYPE(x_new, kHalf);
TORCH_CHECK_DTYPE(x_map, kInt);
TORCH_CHECK_SHAPES(x_map, 0, x, 1, 1);
int height = x.size(0);
int width = x.size(1);
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
column_remap_cuda
(
(half*) x.data_ptr(),
(half*) x_new.data_ptr(),
height,
width,
(uint32_t*) x_map.data_ptr()
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("set_tuning_params", &set_tuning_params, "set_tuning_params");
m.def("prepare_buffers", &prepare_buffers, "prepare_buffers");
m.def("cleanup", &cleanup, "cleanup");
m.def("make_q4", &make_q4, "make_q4");
m.def("q4_matmul", &q4_matmul, "q4_matmul");
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/exllama_ext.cpp/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/exllama_ext.cpp",
"repo_id": "text-generation-inference",
"token_count": 3279
} | 202 |
#ifndef _qdq_2_cuh
#define _qdq_2_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_2BIT == 1
// Permutation:
//
// ffddbb99 77553311 eeccaa88 66442200
__forceinline__ __device__ void shuffle_2bit_16
(
uint32_t* q,
int stride
)
{
uint32_t qa = q[0];
uint32_t qb = 0;
#pragma unroll
for (int i = 0; i < 8; i++)
{
uint32_t qa0 = qa & 0x03;
uint32_t qa1 = (qa & 0x0c) >> 2;
qa >>= 4;
qb |= (qa1 << (i * 2 + 16));
qb |= (qa0 << (i * 2));
}
q[0] = qb;
}
__forceinline__ __device__ void dequant_2bit_16
(
const uint32_t q_0,
half2 (&dq)[8],
int stride
)
{
const uint32_t c0 = 0x64006400;
const half y4_ = __float2half_rn(1.0f / 4.0f);
const half y16_ = __float2half_rn(1.0f / 16.0f);
const half y64_ = __float2half_rn(1.0f / 64.0f);
const half2 y4 = __halves2half2(y4_, y4_);
const half2 y16 = __halves2half2(y16_, y16_);
const half2 y64 = __halves2half2(y64_, y64_);
const half z1_ = __float2half_rn(-1024.0f - 2.0f);
const half z4_ = __float2half_rn(-1024.0f / 4.0f - 2.0f);
const half z16_ = __float2half_rn(-1024.0f / 16.0f - 2.0f);
const half z64_ = __float2half_rn(-1024.0f / 64.0f - 2.0f);
const half2 z1 = __halves2half2(z1_, z1_);
const half2 z4 = __halves2half2(z4_, z4_);
const half2 z16 = __halves2half2(z16_, z16_);
const half2 z64 = __halves2half2(z64_, z64_);
uint32_t qa = q_0;
half2_uint32 q0((qa & 0x00030003) | c0); // half2(q[ 0], q[ 1]) + 1024
half2_uint32 q1((qa & 0x000c000c) | c0); // half2(q[ 2], q[ 3]) * 4 + 1024
half2_uint32 q2((qa & 0x00300030) | c0); // half2(q[ 4], q[ 5]) * 16 + 1024
half2_uint32 q3((qa & 0x00c000c0) | c0); // half2(q[ 6], q[ 7]) * 64 + 1024
qa >>= 8;
half2_uint32 q4((qa & 0x00030003) | c0); // half2(q[ 8], q[ 8]) + 1024
half2_uint32 q5((qa & 0x000c000c) | c0); // half2(q[10], q[11]) * 4 + 1024
half2_uint32 q6((qa & 0x00300030) | c0); // half2(q[12], q[13]) * 16 + 1024
half2_uint32 q7((qa & 0x00c000c0) | c0); // half2(q[14], q[15]) * 64 + 1024
dq[0] = __hadd2(q0.as_half2, z1);
dq[1] = __hfma2(q1.as_half2, y4, z4);
dq[2] = __hfma2(q2.as_half2, y16, z16);
dq[3] = __hfma2(q3.as_half2, y64, z64);
dq[4] = __hadd2(q4.as_half2, z1);
dq[5] = __hfma2(q5.as_half2, y4, z4);
dq[6] = __hfma2(q6.as_half2, y16, z16);
dq[7] = __hfma2(q7.as_half2, y64, z64);
}
#else
__forceinline__ __device__ void shuffle_2bit_16
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_2bit_16
(
const uint32_t q_0,
half2 (&dq)[8],
int stride
)
{
half dqh[16];
for (int i = 0; i < 16; i++) dqh[i] = dq_ns(exb(q_0, i * 2, 0x03), 2);
for (int i = 0; i < 8; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
#endif
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_2.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_2.cuh",
"repo_id": "text-generation-inference",
"token_count": 1589
} | 203 |
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLMBatch
from text_generation_server.utils import weight_hub_files, download_weights
from text_generation_server.models.bloom import BloomCausalLMBatch, BLOOMSharded
@pytest.fixture(scope="session")
def default_bloom():
model_id = "bigscience/bloom-560m"
revision = "main"
filenames = weight_hub_files(model_id, revision, ".safetensors")
download_weights(filenames, model_id, revision)
return BLOOMSharded(model_id)
@pytest.fixture(scope="session")
def bloom_560m_tokenizer():
return AutoTokenizer.from_pretrained("bigscience/bloom-560m", padding_side="left")
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_bloom_batch(default_pb_batch, bloom_560m_tokenizer):
return BloomCausalLMBatch.from_pb(
default_pb_batch, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_bloom_batch(default_pb_request, bloom_560m_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
return BloomCausalLMBatch.from_pb(
batch_pb, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_bloom_batch):
batch = default_bloom_batch
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert len(batch.input_ids) == default_pb_batch.size
assert batch.input_ids[0][-1] == 10264
assert torch.all(batch.input_ids[0][:-1] == 3)
assert batch.attention_mask[0][0] == 1
assert torch.all(batch.attention_mask[0][1:] == 0)
assert batch.past_key_values is None
assert all(
[
torch.equal(input_ids, all_input_ids[:, 0])
for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
]
)
assert batch.input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
def test_batch_concatenate_no_prefill(default_bloom_batch):
with pytest.raises(ValueError):
BloomCausalLMBatch.concatenate([default_bloom_batch, default_bloom_batch])
def test_causal_lm_batch_type(default_bloom):
assert default_bloom.batch_type == BloomCausalLMBatch
def test_causal_lm_generate_token(default_bloom, default_bloom_batch):
sequence_length = len(default_bloom_batch.all_input_ids[0])
generations, next_batch, _ = default_bloom.generate_token(default_bloom_batch)
assert len(generations) == len(default_bloom_batch)
assert isinstance(next_batch, CausalLMBatch)
assert not next_batch.keys_head_dim_last
assert len(next_batch.all_input_ids) == len(next_batch)
assert len(next_batch.all_input_ids[0]) == sequence_length + 1
assert len(next_batch.attention_mask[0]) == 11
assert torch.all(next_batch.all_input_ids[0][-2:] == 10264)
assert torch.all(next_batch.all_input_ids[0][:-2] == 3)
assert torch.all(next_batch.attention_mask[0][:2] == 1)
assert torch.all(next_batch.attention_mask[0][2:] == 0)
assert next_batch.input_ids.shape == (len(next_batch), 1)
assert next_batch.input_ids[0, 0] == 10264
assert next_batch.input_lengths == [2]
assert next_batch.max_input_length == next_batch.input_lengths[0]
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (16, 64, sequence_length) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (16, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 10264
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == "Test"
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_causal_lm_generate_token_completion(default_bloom, default_bloom_batch):
next_batch = default_bloom_batch
for _ in range(default_bloom_batch.stopping_criterias[0].max_new_tokens - 1):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(default_bloom_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert generations[0].request_id == default_bloom_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_bloom_batch.stopping_criterias[0].max_new_tokens
)
def test_causal_lm_generate_token_completion_multi(
default_bloom, default_multi_requests_bloom_batch
):
next_batch = default_multi_requests_bloom_batch
for i in range(
default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(default_multi_requests_bloom_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == "TestTestTestTestTest"
assert (
generations[1].request_id == default_multi_requests_bloom_batch.requests[1].id
)
assert (
generations[1].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
)
# Copy stopping_criterias before filtering
stopping_criterias = default_multi_requests_bloom_batch.stopping_criterias.copy()
next_batch = next_batch.filter([next_batch.requests[0].id])
for _ in range(
stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert (
generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
)
def test_batch_concatenate(
default_bloom, default_bloom_batch, default_multi_requests_bloom_batch
):
next_batch_0 = default_bloom_batch
_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_bloom_batch
_, next_batch_1, _ = default_bloom.generate_token(next_batch_1)
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
]
next_batch = BloomCausalLMBatch.concatenate([next_batch_0, next_batch_1])
assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
assert torch.all(
next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
)
assert torch.all(
next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
)
assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
assert next_batch.batch_id == 0
assert torch.all(next_batch.input_ids == 10264)
assert next_batch.input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == next_batch_1.requests
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all([p[0].shape == (3, 16, 64, 2) for p in next_batch.past_key_values])
assert all([p[1].shape == (3, 16, 2, 64) for p in next_batch.past_key_values])
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][:, :, -2:], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:],
past[0][1:, :, :, -1].reshape(-1, 64, 1),
)
assert torch.equal(next_batch_0_past_key_values[i][1][:, -2:, :], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, -1:, :],
past[1][1:, :, -1, :].reshape(-1, 1, 64),
)
for _ in range(
default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == "TestTestTestTestTest"
assert (
generations[2].request_id == default_multi_requests_bloom_batch.requests[1].id
)
assert (
generations[2].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
)
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
for _ in range(
default_bloom_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
- 2
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert generations[0].request_id == default_bloom_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_bloom_batch.stopping_criterias[0].max_new_tokens
)
next_batch = next_batch.filter([next_batch.requests[1].id])
for _ in range(
default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
- default_bloom_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
- 4
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert (
generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
)
| text-generation-inference/server/tests/models/test_bloom.py/0 | {
"file_path": "text-generation-inference/server/tests/models/test_bloom.py",
"repo_id": "text-generation-inference",
"token_count": 5296
} | 204 |
import math
import torch
from typing import Optional, List, Tuple
BLOCK_SIZE: int = 16
# Will be set in warmup
CACHE_MANAGER: Optional["CacheManager"] = None
class CacheManager:
def __init__(
self,
num_blocks: int,
num_layers: int,
num_heads: int,
head_size: int,
repeat_slots: bool,
dtype: torch.dtype,
device: torch.device,
):
self.block_size = BLOCK_SIZE
self.num_blocks = num_blocks
self.repeat_slots = repeat_slots
element_size = torch.tensor([], dtype=dtype).element_size()
x = self.block_size // element_size
self.kv_cache = [
(
torch.empty(
(num_blocks, num_heads, head_size // x, self.block_size, x),
dtype=dtype,
device=device,
),
torch.empty(
(num_blocks, num_heads, head_size, self.block_size),
dtype=dtype,
device=device,
),
)
for _ in range(num_layers)
]
self.free_block_mask = torch.ones(num_blocks, dtype=torch.int32, device="cpu")
self.slots = torch.arange(
0, num_blocks * self.block_size, dtype=torch.int32
).view(num_blocks, self.block_size)
def allocate(
self,
needed_blocks_slots: List[Tuple[int, int]],
blocks: int,
max_blocks: int,
device: torch.device,
):
# Get free blocks indices by finding values in mask that are not set to 0
free_block_indices = self.free_block_mask.nonzero()
assert (
len(free_block_indices) >= blocks
), f"Out of available cache blocks: asked {blocks}, only {len(free_block_indices)} free blocks"
# Slice by the number of required blocks
block_indices = free_block_indices[:blocks]
block_indices = block_indices.flatten()
# Padded block tables
block_tables_tensor = torch.zeros(
(len(needed_blocks_slots), max_blocks), dtype=torch.int32
)
# Allocate paged attention blocks
cumulative_blocks = 0
slots = []
block_tables = []
for i, (needed_blocks, needed_slots) in enumerate(needed_blocks_slots):
# Get allocated blocks for this sequence
allocated_blocks = block_indices[
cumulative_blocks : cumulative_blocks + needed_blocks
]
# Get slots for the allocated blocks
all_slots = self.slots[allocated_blocks].flatten()
# Repeat slots in the case of context sliding window
if needed_slots > len(all_slots) and self.repeat_slots:
repeats = math.ceil(needed_slots / len(all_slots))
all_slots = all_slots.repeat(repeats)
allocated_slots = all_slots[:needed_slots]
slots.append(allocated_slots)
block_tables.append(allocated_blocks.tolist())
block_tables_tensor[i, :needed_blocks] = allocated_blocks
cumulative_blocks += needed_blocks
block_tables = block_tables
block_tables_tensor = block_tables_tensor.to(device)
slots = torch.concat(slots).to(device)
# Allocate the required number of blocks by setting the mask to 0
self.free_block_mask[block_indices] = 0
return block_tables, block_tables_tensor, slots
def free(self, block_indices: Optional[List[int]]):
if block_indices is not None and block_indices:
# Reset mask
self.free_block_mask[block_indices] = 1
def set_cache_manager(
num_blocks: int,
num_layers: int,
num_heads: int,
head_size: int,
repeat_slots: bool,
dtype: torch.dtype,
device: torch.device,
) -> CacheManager:
global CACHE_MANAGER
if CACHE_MANAGER is not None:
del CACHE_MANAGER
torch.cuda.empty_cache()
CACHE_MANAGER = CacheManager(
num_blocks, num_layers, num_heads, head_size, repeat_slots, dtype, device
)
return CACHE_MANAGER
def get_cache_manager() -> CacheManager:
global CACHE_MANAGER
if CACHE_MANAGER is None:
raise RuntimeError("cache manager was not initialized")
return CACHE_MANAGER
| text-generation-inference/server/text_generation_server/models/cache_manager.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/cache_manager.py",
"repo_id": "text-generation-inference",
"token_count": 2033
} | 205 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Idefics model."""
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import PreTrainedModel
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
dataclass,
)
from transformers.modeling_utils import PretrainedConfig
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig
from text_generation_server.models.custom_modeling.idefics_vision import (
IdeficsVisionTransformer,
)
from text_generation_server.models.custom_modeling.idefics_perceiver import (
IdeficsPerceiverResampler,
)
from text_generation_server.utils.layers import (
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
SpeculativeHead,
PositionRotaryEmbedding,
FastLinear,
)
from text_generation_server.utils.import_utils import IS_CUDA_SYSTEM, IS_ROCM_SYSTEM
if IS_CUDA_SYSTEM:
import dropout_layer_norm
elif IS_ROCM_SYSTEM:
from vllm import layernorm_ops
@dataclass
class BaseModelOutputWithPastImage(BaseModelOutputWithPast):
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
class CausalLMOutputWithPastImage(CausalLMOutputWithPast):
image_hidden_states: Optional[torch.FloatTensor] = None
# logger = logging.get_logger(__name__)
# _CONFIG_FOR_DOC = "IdeficsConfig"
# IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = [
# "HuggingFaceM4/idefics-9b",
# "HuggingFaceM4/idefics-80b",
# # See all Idefics models at https://huggingface.co/models?filter=idefics
# ]
def expand_inputs_for_generation(
input_ids,
expand_size=1,
is_encoder_decoder=False,
attention_mask=None,
encoder_outputs=None,
**model_kwargs,
):
expanded_return_idx = (
torch.arange(input_ids.shape[0])
.view(-1, 1)
.repeat(1, expand_size)
.view(-1)
.to(input_ids.device)
)
input_ids = input_ids.index_select(0, expanded_return_idx)
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = token_type_ids.index_select(
0, expanded_return_idx
)
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(
0, expanded_return_idx
)
model_kwargs["image_attention_mask"] = model_kwargs[
"image_attention_mask"
].index_select(0, expanded_return_idx)
model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(
0, expanded_return_idx
)
if is_encoder_decoder:
if encoder_outputs is None:
raise ValueError(
"If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined."
)
encoder_outputs["last_hidden_state"] = (
encoder_outputs.last_hidden_state.index_select(
0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device)
)
)
model_kwargs["encoder_outputs"] = encoder_outputs
return input_ids, model_kwargs
def update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False):
# must have this key set to at least None
model_kwargs["past_key_values"] = model_kwargs.get("past_key_values", None)
# update past
if "past_key_values" in outputs:
model_kwargs["past"] = outputs.past_key_values
elif "mems" in outputs:
model_kwargs["past"] = outputs.mems
elif "past_buckets_states" in outputs:
model_kwargs["past"] = outputs.past_buckets_states
else:
model_kwargs["past"] = None
# update token_type_ids with last value
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat(
[token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1
)
# update attention masks
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))],
dim=-1,
)
if "image_attention_mask" in model_kwargs:
image_attention_mask = model_kwargs["image_attention_mask"]
last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
model_kwargs["image_attention_mask"] = last_mask
return model_kwargs
def prepare_inputs_for_generation(input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
pixel_values = kwargs.get("pixel_values", None)
image_attention_mask = kwargs.get("image_attention_mask", None)
# if pixel_values is None or image_attention_mask is None:
# raise ValueError("pixel values and image attention mask cannot be None")
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"pixel_values": pixel_values,
"image_attention_mask": image_attention_mask,
}
def freeze_model(model, module_exceptions=[]):
mapping = {
"LayerNorm": nn.LayerNorm,
"Linear": nn.Linear,
"Embedding": nn.Embedding,
}
module_exceptions_mapped = [mapping[m] for m in module_exceptions]
for module in model.modules():
if module_exceptions and any(
[isinstance(module, t) for t in module_exceptions_mapped]
):
module.requires_grad_(
True
) # Explicitely setting it to true to avoid any mistakes
else:
module.requires_grad_(False)
return model
class IdeficsDecoupledPartialTPEmbedding(nn.Module):
def __init__(
self,
config,
weights,
):
super().__init__()
self.num_embeddings = config.vocab_size
self.weight = TensorParallelEmbedding(
prefix="model.embed_tokens", weights=weights
)
self.additional_weight = nn.Parameter(
weights.get_tensor(f"model.embed_tokens.additional_embedding.weight")
)
def forward(self, input_ids):
# Clone so that we don't modify the original input_ids later on
input_ids = input_ids.clone()
additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
input_ids_additional_vocab = input_ids[additional_vocab_indices]
additional_embeddings = torch.nn.functional.embedding(
input_ids_additional_vocab - self.num_embeddings, self.additional_weight
)
# for successful lookup replace input_ids with 0, the results of these will be discarded anyway
input_ids[additional_vocab_indices] = 0
full_vector = self.weight(input_ids)
# overwrite the records with high indices
full_vector[additional_vocab_indices] = additional_embeddings
return full_vector
class IdeficsDecoupledTensorParallelLinear(nn.Module):
# Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
"""
Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
then it will create `out_additional_features * in_features` additional parameters that are always trained. If
`out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
"""
def __init__(
self,
config,
weights,
) -> None:
super().__init__()
self.fc = SpeculativeHead.load(config=config, prefix="lm_head", weights=weights)
self.additional_fc = FastLinear.load(
config=config,
prefix="lm_head.additional_fc",
weights=weights,
bias=False,
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
output, speculative_logits = self.fc(input)
additional_features = self.additional_fc(input)
output = torch.cat((output, additional_features), -1)
return output, speculative_logits
def extra_repr(self) -> str:
"""Overwriting `nn.Linear.extra_repr` to include new parameters."""
return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
self.in_features,
self.out_features,
self.out_additional_features,
self.bias is not None,
self.partially_freeze,
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[
torch.zeros(
tgt_len, past_key_values_length, dtype=dtype, device=device
),
mask,
],
dim=-1,
)
return mask[None, None, :, :].expand(
bsz, 1, tgt_len, tgt_len + past_key_values_length
)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(
inverted_mask.to(torch.bool), torch.finfo(dtype).min
)
class IdeficsRMSNorm(nn.Module):
def __init__(self, prefix, weights, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
weight = weights.get_tensor(f"{prefix}.weight")
self.weight = nn.Parameter(weight)
self.variance_epsilon = eps
def forward(self, hidden_states, residual=None):
if hidden_states.shape[-1] > 8192:
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(
variance + self.variance_epsilon
)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
elif IS_CUDA_SYSTEM:
# faster post attention rms norm
unwrap = False
if len(hidden_states.shape) > 2:
unwrap = True
shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, shape[-1])
normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
None,
None,
None,
None,
None,
0.0,
self.variance_epsilon,
1.0,
0,
None,
False,
True, # Activate RMSNorm
)
if res is None:
res = hidden_states
if unwrap:
normed_hidden_states = normed_hidden_states.view(*shape)
return normed_hidden_states
elif IS_ROCM_SYSTEM:
# We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not.
if residual is not None:
hidden_states += residual
residual = hidden_states
unwrap = False
if len(hidden_states.shape) > 2:
unwrap = True
shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, shape[-1])
out = torch.empty_like(hidden_states)
layernorm_ops.rms_norm(
out,
hidden_states,
self.weight.data,
self.variance_epsilon,
)
if unwrap:
out = out.view(*shape)
return out
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
# this was adapted from LlamaMLP
class IdeficsMLP(nn.Module):
def __init__(
self,
config,
prefix,
weights,
):
super().__init__()
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
shape = gate_up_states.shape
gate_up_states = gate_up_states.view(*shape[:-1], 2, shape[-1] // 2)
return self.down_proj(
self.act_fn(gate_up_states[:, :, 0]) * gate_up_states[:, :, 1]
)
# this was adapted from LlamaAttention
class IdeficsAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config,
prefix,
weights,
qk_layer_norms: bool = False,
is_cross_attention: bool = False,
):
super().__init__()
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.dropout = config.dropout
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.is_cross_attention = is_cross_attention
# if not hasattr(nn.functional, "scaled_dot_product_attention"):
# raise ValueError("this model requires pytorch 2.0 or higher")
process_group = weights.process_group
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads //= weights.process_group.size()
if self.is_cross_attention:
# kv_input_dim = (
# self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
# )
self.q_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.q_proj", weights=weights, bias=False
)
self.k_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.k_proj", weights=weights, bias=False
)
self.v_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.v_proj", weights=weights, bias=False
)
else:
self.qkv = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
self.o_proj = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.o_proj", weights=weights, bias=False
)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config, dim=self.head_dim, base=10000.0, device=weights.device
)
self.qk_layer_norms = qk_layer_norms
if self.qk_layer_norms:
self.q_layer_norm = IdeficsRMSNorm(
prefix=f"{prefix}.q_layer_norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.k_layer_norm = IdeficsRMSNorm(
prefix=f"{prefix}.q_layer_norm",
weights=weights,
eps=config.rms_norm_eps,
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# if key_value_states are provided this layer is used as a cross-attention layer
is_cross_attention = self.is_cross_attention or key_value_states is not None
bsz, q_len, _ = hidden_states.size()
if is_cross_attention:
query_states = self.q_proj(hidden_states).view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
query_states = query_states.transpose(1, 2)
(
_,
kv_len,
_,
) = (
key_value_states.size()
) # Note that, in this case, `kv_len` == `kv_seq_len`
key_states = (
self.k_proj(key_value_states)
.view(bsz, kv_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(key_value_states)
.view(bsz, kv_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
else:
qkv = self.qkv(hidden_states)
query_states, key_states, value_states = qkv.split(
self.num_heads * self.head_dim, dim=2
)
query_states = query_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
key_states = key_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # . transpose(1, 2)
value_states = value_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
kv_seq_len = q_len
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
max_s = max(kv_seq_len, q_len)
cos, sin = self.rotary_emb.get_cos_sin(
position_ids.view(-1), max_s, hidden_states.dtype
)
query_shape = query_states.shape
key_shape = key_states.shape
self.rotary_emb(
query_states.view(-1, *query_shape[2:]),
key_states.reshape(-1, *key_shape[2:]),
cos,
sin,
)
query_states = query_states.view(query_shape)
key_states = key_states.view(key_shape)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
if self.qk_layer_norms:
query_states = self.q_layer_norm(query_states)
key_states = self.k_layer_norm(key_states)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_output = nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=attention_mask,
dropout_p=self.dropout,
)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
attn_weights = None
if output_attentions:
logger.warning_once(
"attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
)
return attn_output, attn_weights, past_key_value
# this was adapted from LlamaDecoderLayer
class IdeficsDecoderLayer(nn.Module):
def __init__(self, layer_id: int, config: IdeficsConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
prefix = f"model.layers.{layer_id}"
self.self_attn = IdeficsAttention(
config=config,
prefix=f"{prefix}.self_attn",
weights=weights,
qk_layer_norms=False,
is_cross_attention=False,
)
self.mlp = IdeficsMLP(
config=config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.input_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.dropout = config.dropout
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
# hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
class IdeficsGatedCrossAttentionLayer(nn.Module):
def __init__(self, layer_id, config: IdeficsConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
prefix = f"model.gated_cross_attn_layers.{layer_id}"
self.cross_attn = IdeficsAttention(
config=config,
prefix=f"{prefix}.cross_attn",
weights=weights,
qk_layer_norms=True,
is_cross_attention=True,
)
self.mlp = IdeficsMLP(
config=config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.input_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.config = config.dropout
self.act_cross_attn = nn.Tanh()
self.act_dense = nn.Tanh()
self.alpha_cross_attn = nn.Parameter(
weights.get_tensor(f"{prefix}.alpha_cross_attn")
)
self.alpha_dense = nn.Parameter(weights.get_tensor(f"{prefix}.alpha_dense"))
if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
raise ValueError("Alpha parameters not initialized correctly!")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
image_hidden_states: Optional[torch.Tensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
no_images: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored
"""
if image_hidden_states is None:
raise ValueError(
"`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
" conditioned on."
)
if past_key_value is not None:
raise NotImplementedError(
"Past key value states are not implemented for Idefics cross attention module."
)
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.cross_attn(
hidden_states=hidden_states,
key_value_states=image_hidden_states,
attention_mask=image_attention_mask,
output_attentions=output_attentions,
)
# hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
# when there are no images the model is used in pure language mode
gate = 0 if no_images else 1
hidden_states = (
residual + gate * self.act_cross_attn(self.alpha_cross_attn) * hidden_states
)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`IdeficsConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
# @add_start_docstrings(
# "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
# LLAMA_START_DOCSTRING,
# )
class IdeficsPreTrainedModel(PreTrainedModel):
config_class = IdeficsConfig
# base_model_prefix = "model"
# supports_gradient_checkpointing = True
# _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
# def _init_weights(self, module):
# # important: this ported version of Idefics isn't meant for training from scratch - only
# # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
# # base should be used for training from scratch and it contains the correct code.
# std = self.config.initializer_range
# if isinstance(module, nn.Linear):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.Embedding):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.padding_idx is not None:
# module.weight.data[module.padding_idx].zero_()
# def _set_gradient_checkpointing(self, module, value=False):
# if isinstance(module, IdeficsModel):
# module.gradient_checkpointing = value
# LLAMA_INPUTS_DOCSTRING = r"""
# Args:
# input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
# Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
# it.
# Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
# [`PreTrainedTokenizer.__call__`] for details.
# [What are input IDs?](../glossary#input-ids)
# attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
# Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
# - 1 for tokens that are **not masked**,
# - 0 for tokens that are **masked**.
# [What are attention masks?](../glossary#attention-mask)
# Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
# [`PreTrainedTokenizer.__call__`] for details.
# If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
# `past_key_values`).
# If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
# and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
# information on the default strategy.
# - 1 indicates the head is **not masked**,
# - 0 indicates the head is **masked**.
# position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
# Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
# config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
# past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
# Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
# `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
# `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
# Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
# blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
# If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
# don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
# `decoder_input_ids` of shape `(batch_size, sequence_length)`.
# inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
# Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
# is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
# model's internal embedding lookup matrix.
# use_cache (`bool`, *optional*):
# If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
# `past_key_values`).
# output_attentions (`bool`, *optional*):
# Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
# tensors for more detail.
# output_hidden_states (`bool`, *optional*):
# Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
# more detail.
# return_dict (`bool`, *optional*):
# Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
# """
# @add_start_docstrings(
# "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
# LLAMA_START_DOCSTRING,
# )
class IdeficsModel(IdeficsPreTrainedModel):
# """
# Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
# Args:
# config: IdeficsConfig
# """
def __init__(self, config: IdeficsConfig, weights):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = IdeficsDecoupledPartialTPEmbedding(
config=config,
weights=weights,
)
self.image_size = config.vision_config.image_size
self.vision_config = config.vision_config
self.vision_model = IdeficsVisionTransformer(
prefix="model.vision_model",
config=config.vision_config,
weights=weights,
)
# Perceiver Resampler
if config.use_resampler:
perceiver_config = config.perceiver_config
self.perceiver_resampler = IdeficsPerceiverResampler(
prefix=f"model.perceiver_resampler",
config=config,
embed_dim=config.vision_config.embed_dim,
depth=perceiver_config.resampler_depth,
n_heads=perceiver_config.resampler_n_heads,
head_dim=perceiver_config.resampler_head_dim,
n_latents=perceiver_config.resampler_n_latents,
weights=weights,
)
self.layers = nn.ModuleList(
[
IdeficsDecoderLayer(layer_id, config, weights)
for layer_id in range(config.num_hidden_layers)
]
)
self.cross_layer_interval = config.cross_layer_interval
num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
self.gated_cross_attn_layers = nn.ModuleList(
[
IdeficsGatedCrossAttentionLayer(layer_id, config, weights)
for layer_id in range(num_cross_layers)
]
)
# self.gradient_checkpointing = False
self.norm = IdeficsRMSNorm(
prefix=f"model.norm", weights=weights, eps=config.rms_norm_eps
)
# self.gradient_checkpointing = False
# Initialize weights and apply final processing
# self.post_init()
# self.freeze_relevant_params(config)
# def freeze_relevant_params(self, config=None):
# if config is None:
# config = self.config
# if config.freeze_text_layers:
# self.freeze_text_layers(config.freeze_text_module_exceptions)
# if config.freeze_vision_layers:
# freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
# def freeze_text_layers(self, module_exceptions=[]):
# for module in [self.layers, self.norm]:
# freeze_model(module, module_exceptions=module_exceptions)
# def freeze_vision_layers(self, module_exceptions=[]):
# freeze_model(self.vision_model, module_exceptions=module_exceptions)
# def get_input_embeddings(self):
# return self.embed_tokens
# def set_input_embeddings(self, value):
# self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
).to(inputs_embeds.device)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
# @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastImage]:
device = input_ids.device if input_ids is not None else inputs_embeds.device
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
elif position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length,
seq_length + past_key_values_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
no_images = False
if image_hidden_states is None:
if pixel_values is None and image_embeddings is None:
raise ValueError(
"Either pixel_values and image_embeddings have to be not-None."
)
elif pixel_values is not None and image_embeddings is not None:
raise ValueError(
"You cannot specify both pixel_values and image_embeddings at the same time"
)
elif pixel_values is not None:
no_images = len(torch.nonzero(pixel_values)) == 0
pixel_values = pixel_values.to(
dtype=self.dtype, device=device
) # fp16 compatibility
batch_size, num_images = pixel_values.shape[:2]
pixel_values = pixel_values.contiguous().view(
batch_size * num_images, *pixel_values.shape[2:]
)
# Get sequence from the vision encoder
image_hidden_states = self.vision_model(
pixel_values=pixel_values
).last_hidden_state
elif image_embeddings is not None:
(
batch_size,
num_images,
image_seq_len,
image_hidden_size,
) = image_embeddings.size()
image_hidden_states = image_embeddings.to(
dtype=self.dtype, device=input_ids.device
)
image_hidden_states = image_hidden_states.view(
batch_size * num_images, image_seq_len, image_hidden_size
)
if self.config.use_resampler:
image_hidden_states = self.perceiver_resampler(image_hidden_states)
image_seq_len, image_hidden_size = image_hidden_states.size(
1
), image_hidden_states.size(2)
image_hidden_states = image_hidden_states.view(
batch_size, num_images * image_seq_len, image_hidden_size
)
else:
no_images = False
num_images = pixel_values.shape[1]
image_seq_len = image_hidden_states.shape[1] // num_images
# # Hack to use the model in full language modeling mode
# image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
# Make image_attention_mask compatible with hidden states
text_seq_len = image_attention_mask.size(1)
image_attention_mask = image_attention_mask.unsqueeze(-1)
image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
image_attention_mask = image_attention_mask.view(
batch_size, text_seq_len, num_images * image_seq_len
)
image_batch_size, image_sequence_length, _ = image_hidden_states.size()
image_hidden_shape = (image_batch_size, image_sequence_length)
if image_attention_mask is None:
image_attention_mask = torch.ones(image_hidden_shape, device=device)
image_attention_mask = self.invert_attention_mask(image_attention_mask)
# if list(image_attention_mask.shape) != [4, 1, 1024, 64]:
# raise ValueError(f"Image hidden_states {image_hidden_states.shape} - mask {image_attention_mask.shape} {num_images} {image_seq_len} {text_seq_len}")
# if image_hidden_states is not None:
# else:
# image_attention_mask = None
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past),
dtype=torch.bool,
device=inputs_embeds.device,
)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask,
(batch_size, seq_length),
inputs_embeds,
past_key_values_length,
)
hidden_states = inputs_embeds
# if self.gradient_checkpointing and self.training:
# if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
# use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = (
past_key_values[idx] if past_key_values is not None else None
)
def vblock(
main_block,
hidden_states,
attention_mask,
position_ids,
past_key_value,
image_hidden_states,
image_attention_mask,
output_attentions,
use_cache,
no_images,
layer_idx,
cross_layer_interval,
gated_cross_attn_layers,
):
# TODO(ls): Add cross attention values to respective lists
if layer_idx % cross_layer_interval == 0:
xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
outputs = xblock(
hidden_states,
attention_mask=attention_mask,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
output_attentions=output_attentions,
use_cache=use_cache,
past_key_value=None, # not implemented
no_images=no_images,
)
hidden_states = outputs[0]
layer_outputs = main_block(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
return layer_outputs
# if self.gradient_checkpointing and self.training:
# past_key_value = None
# if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
# use_cache = False
# layer_outputs = torch.utils.checkpoint.checkpoint(
# vblock,
# decoder_layer,
# hidden_states,
# attention_mask,
# position_ids,
# past_key_value,
# image_hidden_states,
# image_attention_mask,
# output_attentions,
# use_cache,
# no_images,
# idx,
# self.cross_layer_interval,
# self.gated_cross_attn_layers,
# )
# else:
layer_outputs = vblock(
decoder_layer,
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
output_attentions=output_attentions,
use_cache=use_cache,
no_images=no_images,
layer_idx=idx,
cross_layer_interval=self.cross_layer_interval,
gated_cross_attn_layers=self.gated_cross_attn_layers,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
if v is not None
)
return BaseModelOutputWithPastImage(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
image_hidden_states=image_hidden_states,
)
class IdeficsForVisionText2Text(IdeficsPreTrainedModel):
def __init__(
self,
config,
weights,
):
super().__init__(config)
self.model = IdeficsModel(
config=config,
weights=weights,
)
self.lm_head = IdeficsDecoupledTensorParallelLinear(
config=config,
weights=weights,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPastImage]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
image_embeddings=image_embeddings,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits, speculative_logits = self.lm_head(hidden_states)
loss = None
return (
CausalLMOutputWithPastImage(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
),
speculative_logits,
)
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
unwanted_kwargs = ["token_type_ids"]
for kwarg in unwanted_kwargs:
inputs.pop(kwarg, None)
return inputs
@staticmethod
def _expand_inputs_for_generation(
*args,
**model_kwargs,
):
return expand_inputs_for_generation(*args, **model_kwargs)
@staticmethod
def _update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=False
):
return update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder
)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
| text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 28490
} | 206 |
import torch
import torch.distributed
from opentelemetry import trace
from transformers import AutoConfig, AutoTokenizer
from typing import Optional
from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_phi_modeling import (
FlashPhiForCausalLM,
PhiConfig,
)
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
tracer = trace.get_tracer(__name__)
class FlashPhi(FlashCausalLM):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
use_medusa: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = torch.float16 if dtype is None else dtype
else:
raise NotImplementedError("FlashPhi is only available on GPU")
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = PhiConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
config.quantize = quantize
config.use_medusa = use_medusa
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(filenames, device, dtype, process_group=self.process_group)
if config.quantize in ["gptq", "awq"]:
weights._set_gptq_params(model_id, revision)
model = FlashPhiForCausalLM(config, weights)
if use_medusa:
from text_generation_server.utils.medusa import MedusaModel
from huggingface_hub import hf_hub_download
import json
import os
from pathlib import Path
is_local_model = (
Path(use_medusa).exists() and Path(use_medusa).is_dir()
) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None
if not is_local_model:
medusa_config = hf_hub_download(
use_medusa, revision=revision, filename="config.json"
)
medusa_head = hf_hub_download(
use_medusa, revision=revision, filename="medusa_lm_head.pt"
)
else:
medusa_config = str(Path(use_medusa) / "config.json")
medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt")
with open(medusa_config, "r") as f:
config = json.load(f)
medusa_sf = medusa_head[: -len(".pt")] + ".safetensors"
weights = Weights(
[medusa_sf], device, dtype, process_group=self.process_group
)
lm_head = model.lm_head
model.lm_head = MedusaModel(config, weights, lm_head)
torch.distributed.barrier(group=self.process_group)
super(FlashPhi, self).__init__(
model=model,
tokenizer=tokenizer,
num_layers=len(model.model.layers),
num_kv_heads=model.model.num_key_value_heads,
head_size=model.model.head_size,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
| text-generation-inference/server/text_generation_server/models/flash_phi.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/flash_phi.py",
"repo_id": "text-generation-inference",
"token_count": 1738
} | 207 |
import torch
import torch.distributed
from typing import Optional, List
from transformers import AutoTokenizer, AutoModelForCausalLM
from text_generation_server.models import CausalLM
FIM_PREFIX = "<fim-prefix>"
FIM_MIDDLE = "<fim-middle>"
FIM_SUFFIX = "<fim-suffix>"
FIM_PAD = "<fim-pad>"
EOD = "<|endoftext|>"
class SantaCoder(CausalLM):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
use_medusa: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
if torch.cuda.is_available():
device = torch.device("cuda")
dtype = torch.float16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
tokenizer.add_special_tokens(
{
"additional_special_tokens": [
EOD,
FIM_PREFIX,
FIM_MIDDLE,
FIM_SUFFIX,
FIM_PAD,
],
"pad_token": EOD,
}
)
with device:
model = AutoModelForCausalLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=dtype,
load_in_8bit=quantize == "bitsandbytes",
trust_remote_code=trust_remote_code,
)
super(CausalLM, self).__init__(
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
def decode(self, generated_ids: List[int]) -> str:
# Do not skip special tokens as they are used for custom parsing rules of the generated text
return self.tokenizer.decode(
generated_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
| text-generation-inference/server/text_generation_server/models/santacoder.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/santacoder.py",
"repo_id": "text-generation-inference",
"token_count": 1196
} | 208 |
import math
import numpy as np
import torch
import torch.nn as nn
from torch.cuda.amp import custom_bwd, custom_fwd
try:
import triton
import triton.language as tl
from . import custom_autotune
# code based https://github.com/fpgaminer/GPTQ-triton
@custom_autotune.autotune(
configs=[
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=2,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 128,
"GROUP_SIZE_M": 8,
},
num_stages=2,
num_warps=4,
),
],
key=["M", "N", "K"],
nearest_power_of_two=True,
prune_configs_by={
"early_config_prune": custom_autotune.matmul248_kernel_config_pruner,
"perf_model": None,
"top_k": None,
},
)
@triton.jit
def matmul_248_kernel(
a_ptr,
b_ptr,
c_ptr,
scales_ptr,
zeros_ptr,
g_ptr,
M,
N,
K,
bits,
maxq,
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
stride_scales,
stride_zeros,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""
Compute the matrix multiplication C = A x B.
A is of shape (M, K) float16
B is of shape (K//8, N) int32
C is of shape (M, N) float16
scales is of shape (G, N) float16
zeros is of shape (G, N) float16
g_ptr is of shape (K) int32
"""
infearure_per_bits = 32 // bits
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (
offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak
) # (BLOCK_SIZE_M, BLOCK_SIZE_K)
a_mask = offs_am[:, None] < M
# b_ptrs is set up such that it repeats elements along the K axis 8 times
b_ptrs = b_ptr + (
(offs_k[:, None] // infearure_per_bits) * stride_bk
+ offs_bn[None, :] * stride_bn
) # (BLOCK_SIZE_K, BLOCK_SIZE_N)
g_ptrs = g_ptr + offs_k
# shifter is used to extract the N bits of each element in the 32-bit word from B
scales_ptrs = scales_ptr + offs_bn[None, :]
zeros_ptrs = zeros_ptr + (offs_bn[None, :] // infearure_per_bits)
shifter = (offs_k % infearure_per_bits) * bits
zeros_shifter = (offs_bn % infearure_per_bits) * bits
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, num_pid_k):
g_idx = tl.load(g_ptrs)
# Fetch scales and zeros; these are per-outfeature and thus reused in the inner loop
scales = tl.load(
scales_ptrs + g_idx[:, None] * stride_scales
) # (BLOCK_SIZE_K, BLOCK_SIZE_N,)
zeros = tl.load(
zeros_ptrs + g_idx[:, None] * stride_zeros
) # (BLOCK_SIZE_K, BLOCK_SIZE_N,)
zeros = (zeros >> zeros_shifter[None, :]) & maxq
zeros = (zeros + 1) & maxq # eventually avoid overflow
a = tl.load(a_ptrs, mask=a_mask, other=0.0) # (BLOCK_SIZE_M, BLOCK_SIZE_K)
b = tl.load(b_ptrs) # (BLOCK_SIZE_K, BLOCK_SIZE_N), but repeated
# Now we need to unpack b (which is N-bit values) into 32-bit values
b = (b >> shifter[:, None]) & maxq # Extract the N-bit values
b = (b - zeros) * scales # Scale and shift
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K
b_ptrs += (BLOCK_SIZE_K // infearure_per_bits) * stride_bk
g_ptrs += BLOCK_SIZE_K
c_ptrs = c_ptr + stride_cm * offs_am[:, None] + stride_cn * offs_bn[None, :]
c_mask = (offs_am[:, None] < M) & (offs_bn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
except:
print("triton not installed.")
def matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq):
with torch.cuda.device(input.device):
output = torch.empty(
(input.shape[0], qweight.shape[1]), device=input.device, dtype=torch.float16
)
grid = lambda META: (
triton.cdiv(input.shape[0], META["BLOCK_SIZE_M"])
* triton.cdiv(qweight.shape[1], META["BLOCK_SIZE_N"]),
)
matmul_248_kernel[grid](
input,
qweight,
output,
scales,
qzeros,
g_idx,
input.shape[0],
qweight.shape[1],
input.shape[1],
bits,
maxq,
input.stride(0),
input.stride(1),
qweight.stride(0),
qweight.stride(1),
output.stride(0),
output.stride(1),
scales.stride(0),
qzeros.stride(0),
)
return output
class QuantLinearFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input, qweight, scales, qzeros, g_idx, bits, maxq):
output = matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq)
return output
class QuantLinear(nn.Module):
def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize):
super().__init__()
self.register_buffer("qweight", qweight)
self.register_buffer("qzeros", qzeros)
self.register_buffer("scales", scales)
self.register_buffer("g_idx", g_idx)
if bias is not None:
self.register_buffer("bias", bias)
else:
self.bias = None
if bits not in [2, 4, 8]:
raise NotImplementedError("Only 2,4,8 bits are supported.")
self.bits = bits
self.maxq = 2**self.bits - 1
self.groupsize = groupsize
self.outfeatures = qweight.shape[1]
self.infeatures = qweight.shape[0] * 32 // bits
@classmethod
def new(cls, bits, groupsize, infeatures, outfeatures, bias):
if bits not in [2, 4, 8]:
raise NotImplementedError("Only 2,4,8 bits are supported.")
qweight = torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int32)
qzeros = torch.zeros(
(math.ceil(infeatures / groupsize), outfeatures // 32 * bits),
dtype=torch.int32,
)
scales = torch.zeros(
(math.ceil(infeatures / groupsize), outfeatures), dtype=torch.float16
)
g_idx = torch.tensor(
[i // groupsize for i in range(infeatures)], dtype=torch.int32
)
if bias:
bias = torch.zeros((outfeatures), dtype=torch.float16)
else:
bias = None
return cls(qweight, qzeros, scales, g_idx, bias, bits, groupsize)
def pack(self, linear, scales, zeros, g_idx=None):
self.g_idx = g_idx.clone() if g_idx is not None else self.g_idx
scales = scales.t().contiguous()
zeros = zeros.t().contiguous()
scale_zeros = zeros * scales
self.scales = scales.clone().half()
if linear.bias is not None:
self.bias = linear.bias.clone().half()
intweight = []
for idx in range(self.infeatures):
intweight.append(
torch.round(
(linear.weight.data[:, idx] + scale_zeros[self.g_idx[idx]])
/ self.scales[self.g_idx[idx]]
).to(torch.int)[:, None]
)
intweight = torch.cat(intweight, dim=1)
intweight = intweight.t().contiguous()
intweight = intweight.numpy().astype(np.uint32)
qweight = np.zeros(
(intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32
)
i = 0
row = 0
while row < qweight.shape[0]:
if self.bits in [2, 4, 8]:
for j in range(i, i + (32 // self.bits)):
qweight[row] |= intweight[j] << (self.bits * (j - i))
i += 32 // self.bits
row += 1
else:
raise NotImplementedError("Only 2,4,8 bits are supported.")
qweight = qweight.astype(np.int32)
self.qweight = torch.from_numpy(qweight)
zeros -= 1
zeros = zeros.numpy().astype(np.uint32)
qzeros = np.zeros(
(zeros.shape[0], zeros.shape[1] // 32 * self.bits), dtype=np.uint32
)
i = 0
col = 0
while col < qzeros.shape[1]:
if self.bits in [2, 4, 8]:
for j in range(i, i + (32 // self.bits)):
qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i))
i += 32 // self.bits
col += 1
else:
raise NotImplementedError("Only 2,4,8 bits are supported.")
qzeros = qzeros.astype(np.int32)
self.qzeros = torch.from_numpy(qzeros)
def forward(self, x):
out_shape = x.shape[:-1] + (self.outfeatures,)
out = QuantLinearFunction.apply(
x.reshape(-1, x.shape[-1]),
self.qweight,
self.scales,
self.qzeros,
self.g_idx,
self.bits,
self.maxq,
)
out = out + self.bias if self.bias is not None else out
return out.reshape(out_shape)
| text-generation-inference/server/text_generation_server/utils/gptq/quant_linear.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/gptq/quant_linear.py",
"repo_id": "text-generation-inference",
"token_count": 7008
} | 209 |
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors or IDEs
# http://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.md]
trim_trailing_whitespace = false
| tokenizers/bindings/node/.editorconfig/0 | {
"file_path": "tokenizers/bindings/node/.editorconfig",
"repo_id": "tokenizers",
"token_count": 108
} | 210 |
/* tslint:disable */
/* eslint-disable */
/* prettier-ignore */
/* auto-generated by NAPI-RS */
const { existsSync, readFileSync } = require('fs')
const { join } = require('path')
const { platform, arch } = process
let nativeBinding = null
let localFileExisted = false
let loadError = null
function isMusl() {
// For Node 10
if (!process.report || typeof process.report.getReport !== 'function') {
try {
const lddPath = require('child_process').execSync('which ldd').toString().trim()
return readFileSync(lddPath, 'utf8').includes('musl')
} catch (e) {
return true
}
} else {
const { glibcVersionRuntime } = process.report.getReport().header
return !glibcVersionRuntime
}
}
switch (platform) {
case 'android':
switch (arch) {
case 'arm64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.android-arm64.node')
} else {
nativeBinding = require('tokenizers-android-arm64')
}
} catch (e) {
loadError = e
}
break
case 'arm':
localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm-eabi.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.android-arm-eabi.node')
} else {
nativeBinding = require('tokenizers-android-arm-eabi')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on Android ${arch}`)
}
break
case 'win32':
switch (arch) {
case 'x64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-x64-msvc.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.win32-x64-msvc.node')
} else {
nativeBinding = require('tokenizers-win32-x64-msvc')
}
} catch (e) {
loadError = e
}
break
case 'ia32':
localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-ia32-msvc.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.win32-ia32-msvc.node')
} else {
nativeBinding = require('tokenizers-win32-ia32-msvc')
}
} catch (e) {
loadError = e
}
break
case 'arm64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-arm64-msvc.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.win32-arm64-msvc.node')
} else {
nativeBinding = require('tokenizers-win32-arm64-msvc')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on Windows: ${arch}`)
}
break
case 'darwin':
localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-universal.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.darwin-universal.node')
} else {
nativeBinding = require('tokenizers-darwin-universal')
}
break
} catch {}
switch (arch) {
case 'x64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-x64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.darwin-x64.node')
} else {
nativeBinding = require('tokenizers-darwin-x64')
}
} catch (e) {
loadError = e
}
break
case 'arm64':
localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-arm64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.darwin-arm64.node')
} else {
nativeBinding = require('tokenizers-darwin-arm64')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on macOS: ${arch}`)
}
break
case 'freebsd':
if (arch !== 'x64') {
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
}
localFileExisted = existsSync(join(__dirname, 'tokenizers.freebsd-x64.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.freebsd-x64.node')
} else {
nativeBinding = require('tokenizers-freebsd-x64')
}
} catch (e) {
loadError = e
}
break
case 'linux':
switch (arch) {
case 'x64':
if (isMusl()) {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-musl.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-x64-musl.node')
} else {
nativeBinding = require('tokenizers-linux-x64-musl')
}
} catch (e) {
loadError = e
}
} else {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-gnu.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-x64-gnu.node')
} else {
nativeBinding = require('tokenizers-linux-x64-gnu')
}
} catch (e) {
loadError = e
}
}
break
case 'arm64':
if (isMusl()) {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-musl.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-arm64-musl.node')
} else {
nativeBinding = require('tokenizers-linux-arm64-musl')
}
} catch (e) {
loadError = e
}
} else {
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-gnu.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-arm64-gnu.node')
} else {
nativeBinding = require('tokenizers-linux-arm64-gnu')
}
} catch (e) {
loadError = e
}
}
break
case 'arm':
localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm-gnueabihf.node'))
try {
if (localFileExisted) {
nativeBinding = require('./tokenizers.linux-arm-gnueabihf.node')
} else {
nativeBinding = require('tokenizers-linux-arm-gnueabihf')
}
} catch (e) {
loadError = e
}
break
default:
throw new Error(`Unsupported architecture on Linux: ${arch}`)
}
break
default:
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
}
if (!nativeBinding) {
if (loadError) {
throw loadError
}
throw new Error(`Failed to load native binding`)
}
const {
Decoder,
bpeDecoder,
byteFallbackDecoder,
ctcDecoder,
fuseDecoder,
metaspaceDecoder,
replaceDecoder,
sequenceDecoder,
stripDecoder,
wordPieceDecoder,
Encoding,
TruncationDirection,
TruncationStrategy,
Model,
BPE,
WordPiece,
WordLevel,
Unigram,
Normalizer,
prependNormalizer,
stripAccentsNormalizer,
bertNormalizer,
nfdNormalizer,
nfkdNormalizer,
nfcNormalizer,
nfkcNormalizer,
stripNormalizer,
sequenceNormalizer,
lowercase,
replace,
nmt,
precompiled,
JsSplitDelimiterBehavior,
PreTokenizer,
byteLevelPreTokenizer,
byteLevelAlphabet,
whitespacePreTokenizer,
whitespaceSplitPreTokenizer,
bertPreTokenizer,
metaspacePreTokenizer,
splitPreTokenizer,
punctuationPreTokenizer,
sequencePreTokenizer,
charDelimiterSplit,
digitsPreTokenizer,
Processor,
bertProcessing,
robertaProcessing,
byteLevelProcessing,
templateProcessing,
sequenceProcessing,
PaddingDirection,
AddedToken,
Tokenizer,
Trainer,
slice,
mergeEncodings,
} = nativeBinding
module.exports.Decoder = Decoder
module.exports.bpeDecoder = bpeDecoder
module.exports.byteFallbackDecoder = byteFallbackDecoder
module.exports.ctcDecoder = ctcDecoder
module.exports.fuseDecoder = fuseDecoder
module.exports.metaspaceDecoder = metaspaceDecoder
module.exports.replaceDecoder = replaceDecoder
module.exports.sequenceDecoder = sequenceDecoder
module.exports.stripDecoder = stripDecoder
module.exports.wordPieceDecoder = wordPieceDecoder
module.exports.Encoding = Encoding
module.exports.TruncationDirection = TruncationDirection
module.exports.TruncationStrategy = TruncationStrategy
module.exports.Model = Model
module.exports.BPE = BPE
module.exports.WordPiece = WordPiece
module.exports.WordLevel = WordLevel
module.exports.Unigram = Unigram
module.exports.Normalizer = Normalizer
module.exports.prependNormalizer = prependNormalizer
module.exports.stripAccentsNormalizer = stripAccentsNormalizer
module.exports.bertNormalizer = bertNormalizer
module.exports.nfdNormalizer = nfdNormalizer
module.exports.nfkdNormalizer = nfkdNormalizer
module.exports.nfcNormalizer = nfcNormalizer
module.exports.nfkcNormalizer = nfkcNormalizer
module.exports.stripNormalizer = stripNormalizer
module.exports.sequenceNormalizer = sequenceNormalizer
module.exports.lowercase = lowercase
module.exports.replace = replace
module.exports.nmt = nmt
module.exports.precompiled = precompiled
module.exports.JsSplitDelimiterBehavior = JsSplitDelimiterBehavior
module.exports.PreTokenizer = PreTokenizer
module.exports.byteLevelPreTokenizer = byteLevelPreTokenizer
module.exports.byteLevelAlphabet = byteLevelAlphabet
module.exports.whitespacePreTokenizer = whitespacePreTokenizer
module.exports.whitespaceSplitPreTokenizer = whitespaceSplitPreTokenizer
module.exports.bertPreTokenizer = bertPreTokenizer
module.exports.metaspacePreTokenizer = metaspacePreTokenizer
module.exports.splitPreTokenizer = splitPreTokenizer
module.exports.punctuationPreTokenizer = punctuationPreTokenizer
module.exports.sequencePreTokenizer = sequencePreTokenizer
module.exports.charDelimiterSplit = charDelimiterSplit
module.exports.digitsPreTokenizer = digitsPreTokenizer
module.exports.Processor = Processor
module.exports.bertProcessing = bertProcessing
module.exports.robertaProcessing = robertaProcessing
module.exports.byteLevelProcessing = byteLevelProcessing
module.exports.templateProcessing = templateProcessing
module.exports.sequenceProcessing = sequenceProcessing
module.exports.PaddingDirection = PaddingDirection
module.exports.AddedToken = AddedToken
module.exports.Tokenizer = Tokenizer
module.exports.Trainer = Trainer
module.exports.slice = slice
module.exports.mergeEncodings = mergeEncodings
| tokenizers/bindings/node/index.js/0 | {
"file_path": "tokenizers/bindings/node/index.js",
"repo_id": "tokenizers",
"token_count": 4683
} | 211 |
{
"name": "tokenizers-android-arm64",
"version": "0.13.4-rc1",
"os": [
"android"
],
"cpu": [
"arm64"
],
"main": "tokenizers.android-arm64.node",
"files": [
"tokenizers.android-arm64.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/android-arm64/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/android-arm64/package.json",
"repo_id": "tokenizers",
"token_count": 264
} | 212 |
{
"name": "tokenizers-linux-x64-musl",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"x64"
],
"main": "tokenizers.linux-x64-musl.node",
"files": [
"tokenizers.linux-x64-musl.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers",
"libc": [
"musl"
]
} | tokenizers/bindings/node/npm/linux-x64-musl/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-x64-musl/package.json",
"repo_id": "tokenizers",
"token_count": 291
} | 213 |
use crate::arc_rwlock_serde;
use serde::{Deserialize, Serialize};
extern crate tokenizers as tk;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tk::processors::PostProcessorWrapper;
use tk::Encoding;
#[derive(Clone, Serialize, Deserialize)]
#[napi]
pub struct Processor {
#[serde(flatten, with = "arc_rwlock_serde")]
processor: Option<Arc<RwLock<PostProcessorWrapper>>>,
}
impl tk::PostProcessor for Processor {
fn added_tokens(&self, is_pair: bool) -> usize {
self
.processor
.as_ref()
.expect("Uninitialized PostProcessor")
.read()
.unwrap()
.added_tokens(is_pair)
}
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> tk::Result<Vec<Encoding>> {
self
.processor
.as_ref()
.ok_or("Uninitialized PostProcessor")?
.read()
.unwrap()
.process_encodings(encodings, add_special_tokens)
}
}
#[napi]
pub fn bert_processing(sep: (String, u32), cls: (String, u32)) -> Result<Processor> {
Ok(Processor {
processor: Some(Arc::new(RwLock::new(
tk::processors::bert::BertProcessing::new(sep, cls).into(),
))),
})
}
#[napi]
pub fn roberta_processing(
sep: (String, u32),
cls: (String, u32),
trim_offsets: Option<bool>,
add_prefix_space: Option<bool>,
) -> Result<Processor> {
let trim_offsets = trim_offsets.unwrap_or(true);
let add_prefix_space = add_prefix_space.unwrap_or(true);
let mut processor = tk::processors::roberta::RobertaProcessing::new(sep, cls);
processor = processor.trim_offsets(trim_offsets);
processor = processor.add_prefix_space(add_prefix_space);
Ok(Processor {
processor: Some(Arc::new(RwLock::new(processor.into()))),
})
}
#[napi]
pub fn byte_level_processing(trim_offsets: Option<bool>) -> Result<Processor> {
let mut byte_level = tk::processors::byte_level::ByteLevel::default();
if let Some(trim_offsets) = trim_offsets {
byte_level = byte_level.trim_offsets(trim_offsets);
}
Ok(Processor {
processor: Some(Arc::new(RwLock::new(byte_level.into()))),
})
}
#[napi]
pub fn template_processing(
single: String,
pair: Option<String>,
special_tokens: Option<Vec<(String, u32)>>,
) -> Result<Processor> {
let special_tokens = special_tokens.unwrap_or_default();
let mut builder = tk::processors::template::TemplateProcessing::builder();
builder.try_single(single).map_err(Error::from_reason)?;
builder.special_tokens(special_tokens);
if let Some(pair) = pair {
builder.try_pair(pair).map_err(Error::from_reason)?;
}
let processor = builder
.build()
.map_err(|e| Error::from_reason(e.to_string()))?;
Ok(Processor {
processor: Some(Arc::new(RwLock::new(processor.into()))),
})
}
#[napi]
pub fn sequence_processing(processors: Vec<&Processor>) -> Processor {
let sequence: Vec<tk::PostProcessorWrapper> = processors
.into_iter()
.filter_map(|processor| {
processor
.processor
.as_ref()
.map(|processor| (**processor).read().unwrap().clone())
})
.clone()
.collect();
Processor {
processor: Some(Arc::new(RwLock::new(PostProcessorWrapper::Sequence(
tk::processors::sequence::Sequence::new(sequence),
)))),
}
}
| tokenizers/bindings/node/src/processors.rs/0 | {
"file_path": "tokenizers/bindings/node/src/processors.rs",
"repo_id": "tokenizers",
"token_count": 1336
} | 214 |
<p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/py/tokenizers">
<img alt="Build" src="https://badge.fury.io/py/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
# Tokenizers
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
Otherwise, let's dive in!
## Main features:
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
### Installation
#### With pip:
```bash
pip install tokenizers
```
#### From sources:
To use this method, you need to have the Rust installed:
```bash
# Install with:
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="$HOME/.cargo/bin:$PATH"
```
Once Rust is installed, you can compile doing the following
```bash
git clone https://github.com/huggingface/tokenizers
cd tokenizers/bindings/python
# Create a virtual env (you can use yours as well)
python -m venv .env
source .env/bin/activate
# Install `tokenizers` in the current virtual env
pip install -e .
```
### Load a pretrained tokenizer from the Hub
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
```
### Using the provided Tokenizers
We provide some pre-build tokenizers to cover the most common cases. You can easily load one of
these using some `vocab.json` and `merges.txt` files:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
vocab = "./path/to/vocab.json"
merges = "./path/to/merges.txt"
tokenizer = CharBPETokenizer(vocab, merges)
# And then encode:
encoded = tokenizer.encode("I can feel the magic, can you?")
print(encoded.ids)
print(encoded.tokens)
```
And you can train them just as simply:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
tokenizer = CharBPETokenizer()
# Then train it!
tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ])
# Now, let's use it:
encoded = tokenizer.encode("I can feel the magic, can you?")
# And finally save it somewhere
tokenizer.save("./path/to/directory/my-bpe.tokenizer.json")
```
#### Provided Tokenizers
- `CharBPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
All of these can be used and trained as explained above!
### Build your own
Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer,
by putting all the different parts you need together.
You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs.
#### Building a byte-level BPE
Here is an example showing how to build your own byte-level BPE by putting all the different pieces
together, and then saving it to a single file:
```python
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(
vocab_size=20000,
min_frequency=2,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
)
tokenizer.train([
"./path/to/dataset/1.txt",
"./path/to/dataset/2.txt",
"./path/to/dataset/3.txt"
], trainer=trainer)
# And Save it
tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True)
```
Now, when you want to use this tokenizer, this is as simple as:
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json")
encoded = tokenizer.encode("I can feel the magic, can you?")
```
| tokenizers/bindings/python/README.md/0 | {
"file_path": "tokenizers/bindings/python/README.md",
"repo_id": "tokenizers",
"token_count": 1621
} | 215 |
from typing import Dict, Iterator, List, Optional, Tuple, Union
from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from ..models import BPE
from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class CharBPETokenizer(BaseTokenizer):
"""Original BPE Tokenizer
Represents the BPE algorithm, as introduced by Rico Sennrich
(https://arxiv.org/abs/1508.07909)
The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
Sennrich subword-nmt implementation by the following options that you can deactivate:
- adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
* removing any control characters and replacing all whitespaces by the classic one.
* handle chinese chars by putting spaces around them.
* strip all accents.
- spitting on punctuation in addition to whitespaces (deactivate it with
`split_on_whitespace_only=True`)
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
suffix: str = "</w>",
dropout: Optional[float] = None,
lowercase: bool = False,
unicode_normalizer: Optional[str] = None,
bert_normalizer: bool = True,
split_on_whitespace_only: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
unk_token=str(unk_token),
end_of_word_suffix=suffix,
)
)
else:
tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if bert_normalizer:
normalizers += [BertNormalizer(lowercase=False)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
if split_on_whitespace_only:
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
else:
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
parameters = {
"model": "BPE",
"unk_token": unk_token,
"suffix": suffix,
"dropout": dropout,
"lowercase": lowercase,
"unicode_normalizer": unicode_normalizer,
"bert_normalizer": bert_normalizer,
"split_on_whitespace_only": split_on_whitespace_only,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return CharBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py",
"repo_id": "tokenizers",
"token_count": 2509
} | 216 |
[project]
name = 'tokenizers'
requires-python = '>=3.7'
authors = [
{name = 'Nicolas Patry', email = '[email protected]'},
{name = 'Anthony Moi', email = '[email protected]'}
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
keywords = ["NLP", "tokenizer", "BPE", "transformer", "deep learning"]
dynamic = [
'description',
'license',
'readme',
]
dependencies = ["huggingface_hub>=0.16.4,<1.0"]
[project.urls]
Homepage = 'https://github.com/huggingface/tokenizers'
Source = 'https://github.com/huggingface/tokenizers'
[project.optional-dependencies]
testing = ["pytest", "requests", "numpy", "datasets", "black==22.3", "ruff"]
docs = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"]
dev = ["tokenizers[testing]"]
[build-system]
requires = ["maturin>=1.0,<2.0"]
build-backend = "maturin"
[tool.maturin]
python-source = "py_src"
module-name = "tokenizers.tokenizers"
bindings = 'pyo3'
features = ["pyo3/extension-module"]
[tool.black]
line-length = 119
target-version = ['py35']
[tool.ruff]
line-length = 119
target-version = "py311"
lint.ignore = [
# a == None in tests vs is None.
"E711",
# a == False in tests vs is False.
"E712",
# try.. import except.. pattern without using the lib.
"F401",
# Raw type equality is required in asserts
"E721",
# Import order
"E402",
# Fixtures unused import
"F811",
]
| tokenizers/bindings/python/pyproject.toml/0 | {
"file_path": "tokenizers/bindings/python/pyproject.toml",
"repo_id": "tokenizers",
"token_count": 711
} | 217 |
use std::sync::{Arc, RwLock};
use crate::models::PyModel;
use crate::tokenizer::PyAddedToken;
use crate::utils::PyChar;
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use serde::{Deserialize, Serialize};
use tk::models::TrainerWrapper;
use tk::Trainer;
use tokenizers as tk;
/// Base class for all trainers
///
/// This class is not supposed to be instantiated directly. Instead, any implementation of a
/// Trainer will return an instance of this class when instantiated.
#[pyclass(module = "tokenizers.trainers", name = "Trainer", subclass)]
#[derive(Clone, Deserialize, Serialize)]
pub struct PyTrainer {
#[serde(flatten)]
pub trainer: Arc<RwLock<TrainerWrapper>>,
}
impl PyTrainer {
#[cfg(test)]
pub(crate) fn new(trainer: Arc<RwLock<TrainerWrapper>>) -> Self {
PyTrainer { trainer }
}
pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> {
let base = self.clone();
Ok(match *self.trainer.as_ref().read().unwrap() {
TrainerWrapper::BpeTrainer(_) => Py::new(py, (PyBpeTrainer {}, base))?.into_py(py),
TrainerWrapper::WordPieceTrainer(_) => {
Py::new(py, (PyWordPieceTrainer {}, base))?.into_py(py)
}
TrainerWrapper::WordLevelTrainer(_) => {
Py::new(py, (PyWordLevelTrainer {}, base))?.into_py(py)
}
TrainerWrapper::UnigramTrainer(_) => {
Py::new(py, (PyUnigramTrainer {}, base))?.into_py(py)
}
})
}
}
#[pymethods]
impl PyTrainer {
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.trainer).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle PyTrainer: {}",
e
))
})?;
Ok(PyBytes::new(py, data.as_bytes()).to_object(py))
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&PyBytes>(py) {
Ok(s) => {
let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle PyTrainer: {}",
e
))
})?;
self.trainer = unpickled;
Ok(())
}
Err(e) => Err(e),
}
}
}
impl Trainer for PyTrainer {
type Model = PyModel;
fn should_show_progress(&self) -> bool {
self.trainer.read().unwrap().should_show_progress()
}
fn train(&self, model: &mut PyModel) -> tk::Result<Vec<tk::AddedToken>> {
self.trainer
.read()
.unwrap()
.train(&mut model.model.write().unwrap())
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> tk::Result<Vec<String>> + Sync,
{
self.trainer.write().unwrap().feed(iterator, process)
}
}
impl<I> From<I> for PyTrainer
where
I: Into<TrainerWrapper>,
{
fn from(trainer: I) -> Self {
PyTrainer {
trainer: Arc::new(RwLock::new(trainer.into())),
}
}
}
macro_rules! getter {
($self: ident, $variant: ident, $($name: tt)+) => {{
let super_ = $self.as_ref();
if let TrainerWrapper::$variant(ref trainer) = *super_.trainer.read().unwrap() {
trainer.$($name)+
} else {
unreachable!()
}
}};
}
macro_rules! setter {
($self: ident, $variant: ident, $name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() {
trainer.$name = $value;
}
}};
($self: ident, $variant: ident, @$name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() {
trainer.$name($value);
}
}};
}
/// Trainer capable of training a BPE model
///
/// Args:
/// vocab_size (:obj:`int`, `optional`):
/// The size of the final vocabulary, including all tokens and alphabet.
///
/// min_frequency (:obj:`int`, `optional`):
/// The minimum frequency a pair should have in order to be merged.
///
/// show_progress (:obj:`bool`, `optional`):
/// Whether to show progress bars while training.
///
/// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
/// A list of special tokens the model should know of.
///
/// limit_alphabet (:obj:`int`, `optional`):
/// The maximum different characters to keep in the alphabet.
///
/// initial_alphabet (:obj:`List[str]`, `optional`):
/// A list of characters to include in the initial alphabet, even
/// if not seen in the training dataset.
/// If the strings contain more than one character, only the first one
/// is kept.
///
/// continuing_subword_prefix (:obj:`str`, `optional`):
/// A prefix to be used for every subword that is not a beginning-of-word.
///
/// end_of_word_suffix (:obj:`str`, `optional`):
/// A suffix to be used for every subword that is a end-of-word.
///
/// max_token_length (:obj:`int`, `optional`):
/// Prevents creating tokens longer than the specified size.
/// This can help with reducing polluting your vocabulary with
/// highly repetitive tokens like `======` for wikipedia
///
#[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "BpeTrainer")]
pub struct PyBpeTrainer {}
#[pymethods]
impl PyBpeTrainer {
#[getter]
fn get_vocab_size(self_: PyRef<Self>) -> usize {
getter!(self_, BpeTrainer, vocab_size)
}
#[setter]
fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) {
setter!(self_, BpeTrainer, vocab_size, vocab_size);
}
#[getter]
fn get_min_frequency(self_: PyRef<Self>) -> u64 {
getter!(self_, BpeTrainer, min_frequency)
}
#[setter]
fn set_min_frequency(self_: PyRef<Self>, freq: u64) {
setter!(self_, BpeTrainer, min_frequency, freq);
}
#[getter]
fn get_show_progress(self_: PyRef<Self>) -> bool {
getter!(self_, BpeTrainer, show_progress)
}
#[setter]
fn set_show_progress(self_: PyRef<Self>, show_progress: bool) {
setter!(self_, BpeTrainer, show_progress, show_progress);
}
#[getter]
fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> {
getter!(
self_,
BpeTrainer,
special_tokens
.iter()
.map(|tok| tok.clone().into())
.collect()
)
}
#[setter]
fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> {
setter!(
self_,
BpeTrainer,
special_tokens,
special_tokens
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(tk::tokenizer::AddedToken::from(content, true))
} else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() {
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"Special tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?
);
Ok(())
}
#[getter]
fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> {
getter!(self_, BpeTrainer, limit_alphabet)
}
#[setter]
fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) {
setter!(self_, BpeTrainer, limit_alphabet, limit);
}
#[getter]
fn get_max_token_length(self_: PyRef<Self>) -> Option<usize> {
getter!(self_, BpeTrainer, max_token_length)
}
#[setter]
fn set_max_token_length(self_: PyRef<Self>, limit: Option<usize>) {
setter!(self_, BpeTrainer, max_token_length, limit);
}
#[getter]
fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> {
getter!(
self_,
BpeTrainer,
initial_alphabet.iter().map(|c| c.to_string()).collect()
)
}
#[setter]
fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) {
setter!(
self_,
BpeTrainer,
initial_alphabet,
alphabet.into_iter().map(|c| c.0).collect()
);
}
#[getter]
fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> {
getter!(self_, BpeTrainer, continuing_subword_prefix.clone())
}
#[setter]
fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) {
setter!(self_, BpeTrainer, continuing_subword_prefix, prefix);
}
#[getter]
fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> {
getter!(self_, BpeTrainer, end_of_word_suffix.clone())
}
#[setter]
fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) {
setter!(self_, BpeTrainer, end_of_word_suffix, suffix);
}
#[new]
#[pyo3(signature = (**kwargs), text_signature = None)]
pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> {
let mut builder = tk::models::bpe::BpeTrainer::builder();
if let Some(kwargs) = kwargs {
for (key, val) in kwargs {
let key: &str = key.extract()?;
match key {
"vocab_size" => builder = builder.vocab_size(val.extract()?),
"min_frequency" => builder = builder.min_frequency(val.extract()?),
"show_progress" => builder = builder.show_progress(val.extract()?),
"special_tokens" => {
builder = builder.special_tokens(
val.downcast::<PyList>()?
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(PyAddedToken::from(content, Some(true)).get_token())
} else if let Ok(mut token) =
token.extract::<PyRefMut<PyAddedToken>>()
{
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"special_tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?,
);
}
"limit_alphabet" => builder = builder.limit_alphabet(val.extract()?),
"max_token_length" => builder = builder.max_token_length(val.extract()?),
"initial_alphabet" => {
let alphabet: Vec<String> = val.extract()?;
builder = builder.initial_alphabet(
alphabet
.into_iter()
.filter_map(|s| s.chars().next())
.collect(),
);
}
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(val.extract()?)
}
"end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?),
_ => println!("Ignored unknown kwargs option {}", key),
};
}
}
Ok((PyBpeTrainer {}, builder.build().into()))
}
}
/// Trainer capable of training a WordPiece model
///
/// Args:
/// vocab_size (:obj:`int`, `optional`):
/// The size of the final vocabulary, including all tokens and alphabet.
///
/// min_frequency (:obj:`int`, `optional`):
/// The minimum frequency a pair should have in order to be merged.
///
/// show_progress (:obj:`bool`, `optional`):
/// Whether to show progress bars while training.
///
/// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
/// A list of special tokens the model should know of.
///
/// limit_alphabet (:obj:`int`, `optional`):
/// The maximum different characters to keep in the alphabet.
///
/// initial_alphabet (:obj:`List[str]`, `optional`):
/// A list of characters to include in the initial alphabet, even
/// if not seen in the training dataset.
/// If the strings contain more than one character, only the first one
/// is kept.
///
/// continuing_subword_prefix (:obj:`str`, `optional`):
/// A prefix to be used for every subword that is not a beginning-of-word.
///
/// end_of_word_suffix (:obj:`str`, `optional`):
/// A suffix to be used for every subword that is a end-of-word.
#[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordPieceTrainer")]
pub struct PyWordPieceTrainer {}
#[pymethods]
impl PyWordPieceTrainer {
#[getter]
fn get_vocab_size(self_: PyRef<Self>) -> usize {
getter!(self_, WordPieceTrainer, vocab_size())
}
#[setter]
fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) {
setter!(self_, WordPieceTrainer, @set_vocab_size, vocab_size);
}
#[getter]
fn get_min_frequency(self_: PyRef<Self>) -> u64 {
getter!(self_, WordPieceTrainer, min_frequency())
}
#[setter]
fn set_min_frequency(self_: PyRef<Self>, freq: u64) {
setter!(self_, WordPieceTrainer, @set_min_frequency, freq);
}
#[getter]
fn get_show_progress(self_: PyRef<Self>) -> bool {
getter!(self_, WordPieceTrainer, show_progress())
}
#[setter]
fn set_show_progress(self_: PyRef<Self>, show_progress: bool) {
setter!(self_, WordPieceTrainer, @set_show_progress, show_progress);
}
#[getter]
fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> {
getter!(
self_,
WordPieceTrainer,
special_tokens()
.iter()
.map(|tok| tok.clone().into())
.collect()
)
}
#[setter]
fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> {
setter!(
self_,
WordPieceTrainer,
@set_special_tokens,
special_tokens
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(tk::tokenizer::AddedToken::from(content, true))
} else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() {
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"Special tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?
);
Ok(())
}
#[getter]
fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> {
getter!(self_, WordPieceTrainer, limit_alphabet())
}
#[setter]
fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) {
setter!(self_, WordPieceTrainer, @set_limit_alphabet, limit);
}
#[getter]
fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> {
getter!(
self_,
WordPieceTrainer,
initial_alphabet().iter().map(|c| c.to_string()).collect()
)
}
#[setter]
fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) {
setter!(
self_,
WordPieceTrainer,
@set_initial_alphabet,
alphabet.into_iter().map(|c| c.0).collect()
);
}
#[getter]
fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> {
getter!(self_, WordPieceTrainer, continuing_subword_prefix().clone())
}
#[setter]
fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) {
setter!(self_, WordPieceTrainer, @set_continuing_subword_prefix, prefix);
}
#[getter]
fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> {
getter!(self_, WordPieceTrainer, end_of_word_suffix().clone())
}
#[setter]
fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) {
setter!(self_, WordPieceTrainer, @set_end_of_word_suffix, suffix);
}
#[new]
#[pyo3(
signature = (** kwargs),
text_signature = "(self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet= [],continuing_subword_prefix=\"##\", end_of_word_suffix=None)"
)]
pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> {
let mut builder = tk::models::wordpiece::WordPieceTrainer::builder();
if let Some(kwargs) = kwargs {
for (key, val) in kwargs {
let key: &str = key.extract()?;
match key {
"vocab_size" => builder = builder.vocab_size(val.extract()?),
"min_frequency" => builder = builder.min_frequency(val.extract()?),
"show_progress" => builder = builder.show_progress(val.extract()?),
"special_tokens" => {
builder = builder.special_tokens(
val.downcast::<PyList>()?
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(PyAddedToken::from(content, Some(true)).get_token())
} else if let Ok(mut token) =
token.extract::<PyRefMut<PyAddedToken>>()
{
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"special_tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?,
);
}
"limit_alphabet" => builder = builder.limit_alphabet(val.extract()?),
"initial_alphabet" => {
let alphabet: Vec<String> = val.extract()?;
builder = builder.initial_alphabet(
alphabet
.into_iter()
.filter_map(|s| s.chars().next())
.collect(),
);
}
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(val.extract()?)
}
"end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?),
_ => println!("Ignored unknown kwargs option {}", key),
};
}
}
Ok((PyWordPieceTrainer {}, builder.build().into()))
}
}
/// Trainer capable of training a WorldLevel model
///
/// Args:
/// vocab_size (:obj:`int`, `optional`):
/// The size of the final vocabulary, including all tokens and alphabet.
///
/// min_frequency (:obj:`int`, `optional`):
/// The minimum frequency a pair should have in order to be merged.
///
/// show_progress (:obj:`bool`, `optional`):
/// Whether to show progress bars while training.
///
/// special_tokens (:obj:`List[Union[str, AddedToken]]`):
/// A list of special tokens the model should know of.
#[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordLevelTrainer")]
pub struct PyWordLevelTrainer {}
#[pymethods]
impl PyWordLevelTrainer {
#[getter]
fn get_vocab_size(self_: PyRef<Self>) -> usize {
getter!(self_, WordLevelTrainer, vocab_size)
}
#[setter]
fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) {
setter!(self_, WordLevelTrainer, vocab_size, vocab_size);
}
#[getter]
fn get_min_frequency(self_: PyRef<Self>) -> u64 {
getter!(self_, WordLevelTrainer, min_frequency)
}
#[setter]
fn set_min_frequency(self_: PyRef<Self>, freq: u64) {
setter!(self_, WordLevelTrainer, min_frequency, freq);
}
#[getter]
fn get_show_progress(self_: PyRef<Self>) -> bool {
getter!(self_, WordLevelTrainer, show_progress)
}
#[setter]
fn set_show_progress(self_: PyRef<Self>, show_progress: bool) {
setter!(self_, WordLevelTrainer, show_progress, show_progress);
}
#[getter]
fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> {
getter!(
self_,
WordLevelTrainer,
special_tokens
.iter()
.map(|tok| tok.clone().into())
.collect()
)
}
#[setter]
fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> {
setter!(
self_,
WordLevelTrainer,
special_tokens,
special_tokens
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(tk::tokenizer::AddedToken::from(content, true))
} else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() {
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"Special tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?
);
Ok(())
}
#[new]
#[pyo3(signature = (**kwargs), text_signature = None)]
pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> {
let mut builder = tk::models::wordlevel::WordLevelTrainer::builder();
if let Some(kwargs) = kwargs {
for (key, val) in kwargs {
let key: &str = key.extract()?;
match key {
"vocab_size" => {
builder.vocab_size(val.extract()?);
}
"min_frequency" => {
builder.min_frequency(val.extract()?);
}
"show_progress" => {
builder.show_progress(val.extract()?);
}
"special_tokens" => {
builder.special_tokens(
val.downcast::<PyList>()?
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(PyAddedToken::from(content, Some(true)).get_token())
} else if let Ok(mut token) =
token.extract::<PyRefMut<PyAddedToken>>()
{
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"special_tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?,
);
}
_ => println!("Ignored unknown kwargs option {}", key),
}
}
}
Ok((
PyWordLevelTrainer {},
builder
.build()
.expect("WordLevelTrainerBuilder cannot fail")
.into(),
))
}
}
/// Trainer capable of training a Unigram model
///
/// Args:
/// vocab_size (:obj:`int`):
/// The size of the final vocabulary, including all tokens and alphabet.
///
/// show_progress (:obj:`bool`):
/// Whether to show progress bars while training.
///
/// special_tokens (:obj:`List[Union[str, AddedToken]]`):
/// A list of special tokens the model should know of.
///
/// initial_alphabet (:obj:`List[str]`):
/// A list of characters to include in the initial alphabet, even
/// if not seen in the training dataset.
/// If the strings contain more than one character, only the first one
/// is kept.
///
/// shrinking_factor (:obj:`float`):
/// The shrinking factor used at each step of the training to prune the
/// vocabulary.
///
/// unk_token (:obj:`str`):
/// The token used for out-of-vocabulary tokens.
///
/// max_piece_length (:obj:`int`):
/// The maximum length of a given token.
///
/// n_sub_iterations (:obj:`int`):
/// The number of iterations of the EM algorithm to perform before
/// pruning the vocabulary.
#[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "UnigramTrainer")]
pub struct PyUnigramTrainer {}
#[pymethods]
impl PyUnigramTrainer {
#[getter]
fn get_vocab_size(self_: PyRef<Self>) -> u32 {
getter!(self_, UnigramTrainer, vocab_size)
}
#[setter]
fn set_vocab_size(self_: PyRef<Self>, vocab_size: u32) {
setter!(self_, UnigramTrainer, vocab_size, vocab_size);
}
#[getter]
fn get_show_progress(self_: PyRef<Self>) -> bool {
getter!(self_, UnigramTrainer, show_progress)
}
#[setter]
fn set_show_progress(self_: PyRef<Self>, show_progress: bool) {
setter!(self_, UnigramTrainer, show_progress, show_progress);
}
#[getter]
fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> {
getter!(
self_,
UnigramTrainer,
special_tokens
.iter()
.map(|tok| tok.clone().into())
.collect()
)
}
#[setter]
fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> {
setter!(
self_,
UnigramTrainer,
special_tokens,
special_tokens
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(tk::tokenizer::AddedToken::from(content, true))
} else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() {
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"Special tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?
);
Ok(())
}
#[getter]
fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> {
getter!(
self_,
UnigramTrainer,
initial_alphabet.iter().map(|c| c.to_string()).collect()
)
}
#[setter]
fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) {
setter!(
self_,
UnigramTrainer,
initial_alphabet,
alphabet.into_iter().map(|c| c.0).collect()
);
}
#[new]
#[pyo3(
signature = (**kwargs),
text_signature = "(self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2)"
)]
pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> {
let mut builder = tk::models::unigram::UnigramTrainer::builder();
if let Some(kwargs) = kwargs {
for (key, val) in kwargs {
let key: &str = key.extract()?;
match key {
"vocab_size" => builder.vocab_size(val.extract()?),
"show_progress" => builder.show_progress(val.extract()?),
"n_sub_iterations" => builder.n_sub_iterations(val.extract()?),
"shrinking_factor" => builder.shrinking_factor(val.extract()?),
"unk_token" => builder.unk_token(val.extract()?),
"max_piece_length" => builder.max_piece_length(val.extract()?),
"seed_size" => builder.seed_size(val.extract()?),
"initial_alphabet" => {
let alphabet: Vec<String> = val.extract()?;
builder.initial_alphabet(
alphabet
.into_iter()
.filter_map(|s| s.chars().next())
.collect(),
)
}
"special_tokens" => builder.special_tokens(
val.downcast::<PyList>()?
.into_iter()
.map(|token| {
if let Ok(content) = token.extract::<String>() {
Ok(PyAddedToken::from(content, Some(true)).get_token())
} else if let Ok(mut token) =
token.extract::<PyRefMut<PyAddedToken>>()
{
token.special = true;
Ok(token.get_token())
} else {
Err(exceptions::PyTypeError::new_err(
"special_tokens must be a List[Union[str, AddedToken]]",
))
}
})
.collect::<PyResult<Vec<_>>>()?,
),
_ => {
println!("Ignored unknown kwargs option {}", key);
&mut builder
}
};
}
}
let trainer: tokenizers::models::unigram::UnigramTrainer =
builder.build().map_err(|e| {
exceptions::PyException::new_err(format!("Cannot build UnigramTrainer: {}", e))
})?;
Ok((PyUnigramTrainer {}, trainer.into()))
}
}
/// Trainers Module
#[pymodule]
pub fn trainers(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<PyTrainer>()?;
m.add_class::<PyBpeTrainer>()?;
m.add_class::<PyWordPieceTrainer>()?;
m.add_class::<PyWordLevelTrainer>()?;
m.add_class::<PyUnigramTrainer>()?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use tk::models::bpe::trainer::BpeTrainer;
#[test]
fn get_subtype() {
Python::with_gil(|py| {
let py_trainer = PyTrainer::new(Arc::new(RwLock::new(BpeTrainer::default().into())));
let py_bpe = py_trainer.get_as_subtype(py).unwrap();
assert_eq!("BpeTrainer", py_bpe.as_ref(py).get_type().name().unwrap());
})
}
}
| tokenizers/bindings/python/src/trainers.rs/0 | {
"file_path": "tokenizers/bindings/python/src/trainers.rs",
"repo_id": "tokenizers",
"token_count": 17617
} | 218 |
import pickle
import numpy as np
import pytest
from tokenizers import AddedToken, Encoding, Tokenizer
from tokenizers.implementations import BertWordPieceTokenizer
from tokenizers.models import BPE, Model, Unigram
from tokenizers.pre_tokenizers import ByteLevel
from tokenizers.processors import RobertaProcessing
from ..utils import bert_files, data_dir, multiprocessing_with_parallelism, roberta_files
class TestAddedToken:
def test_instantiate_with_content_only(self):
added_token = AddedToken("<mask>")
added_token.content = "<MASK>"
assert added_token.content == "<MASK>"
assert type(added_token) == AddedToken
added_token.content = added_token.content.lower()
assert added_token.special == False
added_token.special = True
assert added_token.special == True
added_token.special = False
assert str(added_token) == "<mask>"
assert (
repr(added_token)
== 'AddedToken("<mask>", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False)'
)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == True
assert isinstance(pickle.loads(pickle.dumps(added_token)), AddedToken)
def test_can_set_rstrip(self):
added_token = AddedToken("<mask>", rstrip=True)
assert added_token.rstrip == True
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == True
def test_can_set_lstrip(self):
added_token = AddedToken("<mask>", lstrip=True)
assert added_token.rstrip == False
assert added_token.lstrip == True
assert added_token.single_word == False
assert added_token.normalized == True
def test_can_set_single_world(self):
added_token = AddedToken("<mask>", single_word=True)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == True
assert added_token.normalized == True
def test_can_set_normalized(self):
added_token = AddedToken("<mask>", normalized=False)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == False
class TestTokenizer:
def test_has_expected_type_and_methods(self):
tokenizer = Tokenizer(BPE())
assert type(tokenizer) == Tokenizer
assert callable(tokenizer.num_special_tokens_to_add)
assert callable(tokenizer.get_vocab)
assert callable(tokenizer.get_vocab_size)
assert callable(tokenizer.enable_truncation)
assert callable(tokenizer.no_truncation)
assert callable(tokenizer.enable_padding)
assert callable(tokenizer.no_padding)
assert callable(tokenizer.encode)
assert callable(tokenizer.encode_batch)
assert callable(tokenizer.decode)
assert callable(tokenizer.decode_batch)
assert callable(tokenizer.token_to_id)
assert callable(tokenizer.id_to_token)
assert callable(tokenizer.add_tokens)
assert callable(tokenizer.add_special_tokens)
assert callable(tokenizer.train)
assert callable(tokenizer.post_process)
assert isinstance(tokenizer.model, Model)
assert tokenizer.normalizer is None
assert tokenizer.pre_tokenizer is None
assert tokenizer.post_processor is None
assert tokenizer.decoder is None
assert isinstance(pickle.loads(pickle.dumps(Tokenizer(BPE()))), Tokenizer)
def test_add_tokens(self):
tokenizer = Tokenizer(BPE())
added = tokenizer.add_tokens(["my", "name", "is", "john"])
assert added == 4
tokens = [AddedToken("the"), AddedToken("quick", normalized=False), AddedToken()]
assert tokens[0].normalized == True
added = tokenizer.add_tokens(tokens)
assert added == 2
assert tokens[0].normalized == True
assert tokens[1].normalized == False
def test_add_special_tokens(self):
tokenizer = Tokenizer(BPE())
# Can add special tokens as `str`
added = tokenizer.add_special_tokens(["my", "name", "is", "john"])
assert added == 4
# Can add special tokens as `AddedToken`
tokens = [AddedToken("the"), AddedToken("quick", normalized=True), AddedToken()]
assert tokens[0].normalized == True
added = tokenizer.add_special_tokens(tokens)
assert added == 2
assert tokens[0].normalized == False
assert tokens[1].normalized == True
def test_encode(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can encode single sequence
output = tokenizer.encode("my name is john")
assert output.tokens == ["my", "name", "is", "john"]
assert type(output.ids) == list
assert type(output.type_ids) == list
assert type(output.offsets) == list
with pytest.warns(DeprecationWarning):
assert type(output.words) == list
assert type(output.word_ids) == list
assert type(output.special_tokens_mask) == list
assert type(output.attention_mask) == list
assert type(output.overflowing) == list
# Can encode a pair of sequences
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["my", "name", "is", "john", "pair"]
assert isinstance(pickle.loads(pickle.dumps(output)), Encoding)
# Can encode a single pre-tokenized sequence
output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True)
assert output.tokens == ["my", "name", "is", "john"]
# Can encode a batch with both a single sequence and a pair of sequences
output = tokenizer.encode_batch(["my name is john", ("my name is john", "pair")])
assert len(output) == 2
def test_encode_formats(self, bert_files):
with pytest.deprecated_call():
tokenizer = BertWordPieceTokenizer(bert_files["vocab"])
# Encode
output = tokenizer.encode("my name is john")
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"]
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True)
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"]
output = tokenizer.encode(["my", "name", "is", "john"], ["pair"], is_pretokenized=True)
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
# Encode batch
result_single = [
["[CLS]", "my", "name", "is", "john", "[SEP]"],
["[CLS]", "my", "name", "is", "georges", "[SEP]"],
]
result_pair = [
["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"],
["[CLS]", "my", "name", "is", "georges", "[SEP]", "pair", "[SEP]"],
]
def format(encodings):
return [e.tokens for e in encodings]
def test_single(input, is_pretokenized=False):
output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized)
assert format(output) == result_single
def test_pair(input, is_pretokenized=False):
output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized)
assert format(output) == result_pair
# Classic inputs
# Lists
test_single(["My name is John", "My name is Georges"])
test_pair([("my name is john", "pair"), ("my name is georges", "pair")])
test_pair([["my name is john", "pair"], ["my name is georges", "pair"]])
# Tuples
test_single(("My name is John", "My name is Georges"))
test_pair((("My name is John", "pair"), ("My name is Georges", "pair")))
# Numpy
test_single(np.array(["My name is John", "My name is Georges"]))
test_pair(np.array([("My name is John", "pair"), ("My name is Georges", "pair")]))
test_pair(np.array([["My name is John", "pair"], ["My name is Georges", "pair"]]))
# PreTokenized inputs
# Lists
test_single([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]], True)
test_pair(
[
(["My", "name", "is", "John"], ["pair"]),
(["My", "name", "is", "Georges"], ["pair"]),
],
True,
)
test_pair(
[
[["My", "name", "is", "John"], ["pair"]],
[["My", "name", "is", "Georges"], ["pair"]],
],
True,
)
# Tuples
test_single((("My", "name", "is", "John"), ("My", "name", "is", "Georges")), True)
test_pair(
(
(("My", "name", "is", "John"), ("pair",)),
(("My", "name", "is", "Georges"), ("pair",)),
),
True,
)
test_pair(
(
(["My", "name", "is", "John"], ["pair"]),
(["My", "name", "is", "Georges"], ["pair"]),
),
True,
)
# Numpy
test_single(
np.array([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]]),
True,
)
test_single(
np.array((("My", "name", "is", "John"), ("My", "name", "is", "Georges"))),
True,
)
test_pair(
np.array(
[
[["My", "name", "is", "John"], ["pair"]],
[["My", "name", "is", "Georges"], ["pair"]],
],
dtype=object,
),
True,
)
test_pair(
np.array(
(
(("My", "name", "is", "John"), ("pair",)),
(("My", "name", "is", "Georges"), ("pair",)),
),
dtype=object,
),
True,
)
# Mal formed
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode([["my", "name"]])
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode("My name is john", [["pair"]])
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode("my name is john", ["pair"])
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode("My name is john", is_pretokenized=True)
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode("My name is john", ["pair"], is_pretokenized=True)
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode(["My", "name", "is", "John"], "pair", is_pretokenized=True)
def test_encode_add_special_tokens(self, roberta_files):
with pytest.deprecated_call():
tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"]))
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True)
tokenizer.post_processor = RobertaProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
# Can encode with special tokens
output_with_specials = tokenizer.encode("My name is John", add_special_tokens=True)
assert output_with_specials.tokens == ["<s>", "ĠMy", "Ġname", "Ġis", "ĠJohn", "</s>"]
# Can encode without special tokens
output_without_specials = tokenizer.encode("My name is John", add_special_tokens=False)
assert output_without_specials.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
def test_truncation(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.enable_truncation(2)
# Can truncate single sequences
output = tokenizer.encode("my name is john")
assert output.tokens == ["my", "name"]
# Can truncate pair sequences as well
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["my", "pair"]
# Can get the params and give them to enable_truncation
trunc = tokenizer.truncation
tokenizer.enable_truncation(**trunc)
# Left truncation direction
tokenizer.enable_truncation(2, direction="left")
output = tokenizer.encode("my name is john")
assert output.tokens == ["is", "john"]
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["john", "pair"]
def test_padding(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# By default it does nothing when encoding single sequence
tokenizer.enable_padding()
output = tokenizer.encode("my name")
assert output.tokens == ["my", "name"]
# Can pad to the longest in a batch
output = tokenizer.encode_batch(["my name", "my name is john"])
assert all([len(encoding) == 4 for encoding in output])
# Can pad to the specified length otherwise
tokenizer.enable_padding(length=4)
output = tokenizer.encode("my name")
assert output.tokens == ["my", "name", "[PAD]", "[PAD]"]
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["my", "name", "pair", "[PAD]"]
# Can get the params and give them to enable_padding
padding = tokenizer.padding
tokenizer.enable_padding(**padding)
def test_decode(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can decode single sequences
output = tokenizer.decode([0, 1, 2, 3])
assert output == "my name is john"
# Can decode batch
output = tokenizer.decode_batch([[0, 1, 2, 3], [4]])
assert output == ["my name is john", "pair"]
def test_get_vocab(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can retrieve vocab with added tokens
vocab = tokenizer.get_vocab(with_added_tokens=True)
assert vocab == {"is": 2, "john": 3, "my": 0, "name": 1, "pair": 4}
# Can retrieve vocab without added tokens
vocab = tokenizer.get_vocab(with_added_tokens=False)
assert vocab == {}
# Can retrieve added token decoder
vocab = tokenizer.get_added_tokens_decoder()
assert vocab == {
0: AddedToken("my", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
1: AddedToken("name", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
2: AddedToken("is", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
3: AddedToken("john", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
4: AddedToken("pair", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
}
def test_get_vocab_size(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can retrieve vocab's size with added tokens
size = tokenizer.get_vocab_size(with_added_tokens=True)
assert size == 5
# Can retrieve vocab's size without added tokens
size = tokenizer.get_vocab_size(with_added_tokens=False)
assert size == 0
def test_post_process(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.enable_truncation(2)
tokenizer.enable_padding(length=4)
encoding = tokenizer.encode("my name is john")
pair_encoding = tokenizer.encode("pair")
# Can post process a single encoding
output = tokenizer.post_process(encoding)
assert output.tokens == ["my", "name", "[PAD]", "[PAD]"]
# Can post process a pair of encodings
output = tokenizer.post_process(encoding, pair_encoding)
assert output.tokens == ["my", "pair", "[PAD]", "[PAD]"]
def test_multiprocessing_with_parallelism(self):
tokenizer = Tokenizer(BPE())
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_from_pretrained(self):
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["Hey", "there", "dear", "friend", "!"]
def test_from_pretrained_revision(self):
tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["hey", "there", "dear", "friend", "!"]
tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test", revision="gpt-2")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"]
def test_unigram_byte_fallback(self):
vocab = [
("<unk>", 0.0),
("A", -0.01),
("sen", -0.02),
("te", -0.03),
("n", -0.04),
("ce", -0.05),
("<0xF0>", -0.06),
("<0x9F>", -0.06),
("<0xA4>", -0.06),
("<0x97>", -0.06),
(" ", -0.4),
]
tokenizer = tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False))
output = tokenizer.encode("A sentence 🤗")
assert output.ids == [1, 10, 2, 3, 4, 5, 10, 0]
assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "🤗"]
tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True))
output = tokenizer.encode("A sentence 🤗")
assert output.ids == [1, 10, 2, 3, 4, 5, 10, 6, 7, 8, 9]
assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "<0xF0>", "<0x9F>", "<0xA4>", "<0x97>"]
def test_encode_special_tokens(self):
tokenizer = Tokenizer.from_pretrained("t5-base")
tokenizer.add_tokens(["<eot>"])
tokenizer.add_special_tokens(["<end_of_text>"])
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == ["▁Hey", "▁there", "<end_of_text>", "▁dear", "<eot>", "▁friend", "!"]
tokenizer.encode_special_tokens = True
assert tokenizer.encode_special_tokens == True
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == [
"▁Hey",
"▁there",
"<",
"end",
"_",
"of",
"_",
"text",
">",
"▁dear",
"<eot>",
"▁friend",
"!",
]
tokenizer.add_tokens(["of_text>"])
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == ["▁Hey", "▁there", "<", "end", "_", "of_text>", "▁dear", "<eot>", "▁friend", "!"]
| tokenizers/bindings/python/tests/bindings/test_tokenizer.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_tokenizer.py",
"repo_id": "tokenizers",
"token_count": 8966
} | 219 |
- sections:
- local: index
title: 🤗 Tokenizers
- local: quicktour
title: Quicktour
- local: installation
title: Installation
- local: pipeline
title: The tokenization pipeline
- local: components
title: Components
- local: training_from_memory
title: Training from memory
title: Getting started
- sections:
- local: api/input-sequences
title: Input Sequences
- local: api/encode-inputs
title: Encode Inputs
- local: api/tokenizer
title: Tokenizer
- local: api/encoding
title: Encoding
- local: api/added-tokens
title: Added Tokens
- local: api/models
title: Models
- local: api/normalizers
title: Normalizers
- local: api/pre-tokenizers
title: Pre-tokenizers
- local: api/post-processors
title: Post-processors
- local: api/trainers
title: Trainers
- local: api/decoders
title: Decoders
- local: api/visualizer
title: Visualizer
title: API
| tokenizers/docs/source-doc-builder/_toctree.yml/0 | {
"file_path": "tokenizers/docs/source-doc-builder/_toctree.yml",
"repo_id": "tokenizers",
"token_count": 338
} | 220 |
# The tokenization pipeline
When calling `Tokenizer.encode` or
`Tokenizer.encode_batch`, the input
text(s) go through the following pipeline:
- `normalization`
- `pre-tokenization`
- `model`
- `post-processing`
We'll see in details what happens during each of those steps in detail,
as well as when you want to `decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you
to customize each of those steps to your needs. If you're already
familiar with those steps and want to learn by seeing some code, jump to
`our BERT from scratch example <example>`.
For the examples that require a `Tokenizer` we will use the tokenizer we trained in the
`quicktour`, which you can load with:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_reload_tokenizer",
"end-before": "END pipeline_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Normalization
Normalization is, in a nutshell, a set of operations you apply to a raw
string to make it less random or "cleaner". Common operations include
stripping whitespace, removing accented characters or lowercasing all
text. If you're familiar with [Unicode
normalization](https://unicode.org/reports/tr15), it is also a very
common normalization operation applied in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library
by a `Normalizer`, and you can combine
several of those by using a `normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_normalizer",
"end-before": "END pipeline_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can manually test that normalizer by applying it to any string:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_normalizer",
"end-before": "END pipeline_test_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When building a `Tokenizer`, you can
customize its normalizer by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_normalizer",
"end-before": "END pipeline_replace_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way a tokenizer applies normalization, you
should probably retrain it from scratch afterward.
## Pre-Tokenization
Pre-tokenization is the act of splitting a text into smaller objects
that give an upper bound to what your tokens will be at the end of
training. A good way to think of this is that the pre-tokenizer will
split your text into "words" and then, your final tokens will be parts
of those words.
An easy way to pre-tokenize inputs is to split on spaces and
punctuations, which is done by the
`pre_tokenizers.Whitespace`
pre-tokenizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_pre_tokenizer",
"end-before": "END pipeline_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is a list of tuples, with each tuple containing one word and
its span in the original sentence (which is used to determine the final
`offsets` of our `Encoding`). Note that splitting on
punctuation will split contractions like `"I'm"` in this example.
You can combine together any `PreTokenizer` together. For instance, here is a pre-tokenizer that will
split on space, punctuation and digits, separating numbers in their
individual digits:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_combine_pre_tokenizer",
"end-before": "END pipeline_combine_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
As we saw in the `quicktour`, you can
customize the pre-tokenizer of a `Tokenizer` by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_pre_tokenizer",
"end-before": "END pipeline_replace_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way the pre-tokenizer, you should probably
retrain your tokenizer from scratch afterward.
## Model
Once the input texts are normalized and pre-tokenized, the
`Tokenizer` applies the model on the
pre-tokens. This is the part of the pipeline that needs training on your
corpus (or that has been trained if you are using a pretrained
tokenizer).
The role of the model is to split your "words" into tokens, using the
rules it has learned. It's also responsible for mapping those tokens to
their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the
`Tokenizer` so you already know how to
customize this part. Currently, the 🤗 Tokenizers library supports:
- `models.BPE`
- `models.Unigram`
- `models.WordLevel`
- `models.WordPiece`
For more details about each model and its behavior, you can check
[here](components#models)
## Post-Processing
Post-processing is the last step of the tokenization pipeline, to
perform any additional transformation to the
`Encoding` before it's returned, like
adding potential special tokens.
As we saw in the quick tour, we can customize the post processor of a
`Tokenizer` by setting the
corresponding attribute. For instance, here is how we can post-process
to make the inputs suitable for the BERT model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_processor",
"end-before": "END pipeline_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Note that contrarily to the pre-tokenizer or the normalizer, you don't
need to retrain a tokenizer after changing its post-processor.
## All together: a BERT tokenizer from scratch
Let's put all those pieces together to build a BERT tokenizer. First,
BERT relies on WordPiece, so we instantiate a new
`Tokenizer` with this model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Then we know that BERT preprocesses texts by removing accents and
lowercasing. We also use a unicode normalizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The pre-tokenizer is just splitting on whitespace and punctuation:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
And the post-processing uses the template we saw in the previous
section:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can use this tokenizer and train on it on wikitext like in the
`quicktour`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Decoding
On top of encoding the input texts, a `Tokenizer` also has an API for decoding, that is converting IDs
generated by your model back to a text. This is done by the methods
`Tokenizer.decode` (for one predicted text) and `Tokenizer.decode_batch` (for a batch of predictions).
The `decoder` will first convert the IDs back to tokens
(using the tokenizer's vocabulary) and remove all special tokens, then
join those tokens with spaces:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_decoding",
"end-before": "END pipeline_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you used a model that added special characters to represent subtokens
of a given "word" (like the `"##"` in
WordPiece) you will need to customize the `decoder` to treat
them properly. If we take our previous `bert_tokenizer` for instance the
default decoding will give:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
But by changing it to a proper decoder, we get:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
| tokenizers/docs/source-doc-builder/pipeline.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/pipeline.mdx",
"repo_id": "tokenizers",
"token_count": 5903
} | 221 |
Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Rust API Reference is available directly on the `Docs.rs <https://docs.rs/tokenizers>`__
website.
| tokenizers/docs/source/api/rust.inc/0 | {
"file_path": "tokenizers/docs/source/api/rust.inc",
"repo_id": "tokenizers",
"token_count": 43
} | 222 |
language: node_js
node_js: "10"
script:
- ./node_modules/.bin/webpack
| tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml",
"repo_id": "tokenizers",
"token_count": 30
} | 223 |
use crate::decoders::DecoderWrapper;
use crate::tokenizer::{Decoder, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
decoders: Vec<DecoderWrapper>,
}
impl Sequence {
pub fn new(decoders: Vec<DecoderWrapper>) -> Self {
Self { decoders }
}
}
impl Decoder for Sequence {
fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> {
for decoder in &self.decoders {
tokens = decoder.decode_chain(tokens)?;
}
Ok(tokens)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::decoders::ctc::CTC;
use crate::pre_tokenizers::metaspace::Metaspace;
#[test]
fn sequence_basic() {
let decoders = vec![
DecoderWrapper::CTC(CTC::default()),
DecoderWrapper::Metaspace(Metaspace::default()),
];
let decoder = Sequence::new(decoders);
let tokens: Vec<String> = vec!["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"]
.into_iter()
.map(|s| s.to_string())
.collect();
let out_tokens = decoder.decode(tokens).unwrap();
assert_eq!(out_tokens, "Hi you");
}
}
| tokenizers/tokenizers/src/decoders/sequence.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/sequence.rs",
"repo_id": "tokenizers",
"token_count": 600
} | 224 |
use super::OrderedVocabIter;
use crate::tokenizer::{Model, Result, Token};
use serde_json::Value;
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, Read, Write};
use std::path::{Path, PathBuf};
mod serialization;
mod trainer;
// Re-export
pub use trainer::*;
type Vocab = HashMap<String, u32>;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("WordLevel error: Missing [UNK] token from the vocabulary")]
MissingUnkToken,
#[error("Bad vocabulary json file")]
BadVocabulary,
}
struct Config {
files: Option<String>,
vocab: HashMap<String, u32>,
unk_token: String,
}
/// A `WordLevelBuilder` can be used to create a `WordLevel`
/// model with a custom configuration.
pub struct WordLevelBuilder {
config: Config,
}
impl Default for WordLevelBuilder {
fn default() -> Self {
Self {
config: Config {
files: None,
vocab: HashMap::new(),
unk_token: String::from("<unk>"),
},
}
}
}
impl WordLevelBuilder {
/// Construct a new `WordLevelBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Set the input files.
#[must_use]
pub fn files(mut self, vocab: String) -> Self {
self.config.files = Some(vocab);
self
}
/// Set the vocab (token -> ID) mapping.
#[must_use]
pub fn vocab(mut self, vocab: HashMap<String, u32>) -> Self {
self.config.vocab = vocab;
self
}
/// The the `UNK` token for the vocab.
#[must_use]
pub fn unk_token(mut self, unk_token: String) -> Self {
self.config.unk_token = unk_token;
self
}
/// Contructs a `WordLevel` model that uses the `WordLevelBuilder`'s configuration.
pub fn build(mut self) -> Result<WordLevel> {
if let Some(vocab) = self.config.files {
self.config.vocab = WordLevel::read_file(&vocab)?;
}
let vocab_r = self
.config
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
Ok(WordLevel {
vocab: self.config.vocab,
vocab_r,
unk_token: self.config.unk_token,
})
}
}
#[derive(PartialEq, Clone, Eq)]
pub struct WordLevel {
vocab: HashMap<String, u32>,
vocab_r: HashMap<u32, String>,
pub unk_token: String,
}
impl std::fmt::Debug for WordLevel {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("WordLevel")
.field("unk_token", &self.unk_token)
.field("vocab", &self.vocab.len())
.finish()
}
}
impl WordLevel {
pub fn builder() -> WordLevelBuilder {
WordLevelBuilder::new()
}
pub fn read_file(vocab_path: &str) -> Result<Vocab> {
let vocab_file = File::open(vocab_path)?;
let mut vocab_file = BufReader::new(vocab_file);
let mut buffer = String::new();
let mut vocab = HashMap::new();
vocab_file.read_to_string(&mut buffer)?;
let json: Value = serde_json::from_str(&buffer)?;
match json {
Value::Object(m) => {
for (token, id) in m {
if let Value::Number(id) = id {
let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32;
vocab.insert(token, id);
}
}
}
_ => return Err(Box::new(Error::BadVocabulary)),
};
Ok(vocab)
}
/// Initialize a WordLevel model from vocab and merges file.
pub fn from_file(vocab_path: &str, unk_token: String) -> Result<WordLevel> {
let vocab = WordLevel::read_file(vocab_path)?;
Self::builder().vocab(vocab).unk_token(unk_token).build()
}
}
impl Default for WordLevel {
fn default() -> Self {
Self {
vocab: HashMap::new(),
vocab_r: HashMap::new(),
unk_token: String::from("<unk>"),
}
}
}
impl Model for WordLevel {
type Trainer = WordLevelTrainer;
fn tokenize(&self, token: &str) -> Result<Vec<Token>> {
if let Some(&id) = self.vocab.get(token) {
Ok(vec![Token {
id,
value: token.to_owned(),
offsets: (0, token.len()),
}])
} else if let Some(&unk_id) = self.vocab.get(&self.unk_token) {
Ok(vec![Token {
id: unk_id,
value: self.unk_token.to_owned(),
offsets: (0, token.len()),
}])
} else {
Err(Box::new(Error::MissingUnkToken))
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.keys().len()
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let vocab_file_name = match name {
Some(name) => format!("{}-vocab.json", name),
None => "vocab.json".to_string(),
};
// Write vocab.json
let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())]
.iter()
.collect();
let mut vocab_file = File::create(&vocab_path)?;
let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter)?;
vocab_file.write_all(serialized.as_bytes())?;
Ok(vec![vocab_path])
}
fn get_trainer(&self) -> Self::Trainer {
WordLevelTrainer::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tokenize_unk() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let wordlevel = WordLevelBuilder::default()
.vocab(vocab)
.unk_token("<unk>".to_string())
.build()
.unwrap();
let tokens = wordlevel.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = wordlevel.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "a".into(), (0, 1)),]);
}
#[test]
fn test_tokenize_missing_unk_token() {
let vocab: Vocab = [("a".into(), 0), ("b".into(), 1)].iter().cloned().collect();
let wordlevel = WordLevelBuilder::default().vocab(vocab).build().unwrap();
let tokens = wordlevel.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "a".into(), (0, 1)),]);
let error = wordlevel.tokenize("c").err().unwrap();
assert!(error.is::<Error>());
}
}
| tokenizers/tokenizers/src/models/wordlevel/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordlevel/mod.rs",
"repo_id": "tokenizers",
"token_count": 3383
} | 225 |
use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct CharDelimiterSplit {
pub delimiter: char,
}
impl CharDelimiterSplit {
pub fn new(delimiter: char) -> Self {
Self { delimiter }
}
}
impl PreTokenizer for CharDelimiterSplit {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
// TODO: Maybe add the option to specify the behavior
pretokenized.split(|_, normalized| {
normalized.split(self.delimiter, SplitDelimiterBehavior::Removed)
})
}
}
| tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs",
"repo_id": "tokenizers",
"token_count": 296
} | 226 |
use super::{
normalizer::Range, Model, NormalizedString, Normalizer, Offsets, PreTokenizedString, Token,
};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind};
use regex::Regex;
use serde::{ser::SerializeSeq, Deserialize, Serialize, Serializer};
use std::collections::{HashMap, HashSet};
/// Represent a token added by the user on top of the existing Model vocabulary.
/// AddedToken can be configured to specify the behavior they should have in various situations
/// like:
/// - Whether they should only match single words
/// - Whether to include any whitespace on its left or right
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct AddedToken {
/// The content of the added token
pub content: String,
/// Whether this token must be a single word or can break words
pub single_word: bool,
/// Whether this token should strip whitespaces on its left
pub lstrip: bool,
/// Whether this token should strip whitespaces on its right
pub rstrip: bool,
/// Whether this token should be normalized
pub normalized: bool,
/// Whether this token is special
pub special: bool,
}
impl AddedToken {
/// Build this token from the given content, specifying if it is intented to be a
/// special token. Special tokens are not normalized by default.
pub fn from<S: Into<String>>(content: S, special: bool) -> Self {
Self {
content: content.into(),
normalized: !special,
special,
..Default::default()
}
}
/// Specify whether this token should only match on whole single words, and never
/// part of a word.
#[must_use]
pub fn single_word(mut self, single_word: bool) -> Self {
self.single_word = single_word;
self
}
/// Specify whether this token should include all the whitespaces on its left, in
/// order to strip them out.
#[must_use]
pub fn lstrip(mut self, lstrip: bool) -> Self {
self.lstrip = lstrip;
self
}
/// Specify whether this token should include all the whitespaces on its right, in
/// order to strip them out.
#[must_use]
pub fn rstrip(mut self, rstrip: bool) -> Self {
self.rstrip = rstrip;
self
}
/// Specify whether this token should be normalized and match against its normalized
/// version in the input text.
#[must_use]
pub fn normalized(mut self, normalized: bool) -> Self {
self.normalized = normalized;
self
}
/// Specify whether this token is special, meaning if it should be skipped when decoding
#[must_use]
pub fn special(mut self, special: bool) -> Self {
self.special = special;
self
}
}
impl Default for AddedToken {
fn default() -> Self {
Self {
content: String::new(),
single_word: false,
lstrip: false,
rstrip: false,
normalized: true,
special: false,
}
}
}
// AddedTokens can be updated if value changed
impl std::hash::Hash for AddedToken {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.content.hash(state);
}
}
type MatchingSet = (AhoCorasick, Vec<u32>);
lazy_static! {
static ref STARTS_WITH_WORD: Regex = Regex::new(r"^\w").unwrap();
static ref ENDS_WITH_WORD: Regex = Regex::new(r"\w$").unwrap();
static ref RIGHTMOST_SPACE_AT_START: Regex = Regex::new(r"^\s*").unwrap();
static ref LEFTMOST_SPACE_AT_END: Regex = Regex::new(r"\s*$").unwrap();
}
fn ends_with_word(sentence: &str) -> bool {
ENDS_WITH_WORD.is_match(sentence)
}
fn starts_with_word(sentence: &str) -> bool {
STARTS_WITH_WORD.is_match(sentence)
}
fn space_leftmost_at_end(sentence: &str) -> usize {
if let Some(match_) = LEFTMOST_SPACE_AT_END.find(sentence) {
match_.start()
} else {
sentence.len()
}
}
fn space_rightmost_at_start(sentence: &str) -> usize {
if let Some(match_) = RIGHTMOST_SPACE_AT_START.find(sentence) {
match_.end()
} else {
0
}
}
///
/// A vocabulary built on top of the Model
///
/// This provides a way to add new vocabulary to a Tokenizer that has already been trained,
/// in a previous process, maybe by someone else. This is especially interesting in the case
/// of fine-tunings, where we want to finetune a model while adding some new functionalities
/// using some new special tokens, or maybe add some tokens in the case of unknown tokens, etc.
///
/// One of the reasons we need to handle these tokens outside of the model is simply that
/// for many models, it is not possible to add new tokens after the training process. For example,
/// using BPE, the training process generates merges pairs along the vocabulary, and any token
/// in the vocabulary can be decomposed in other tokens, down to the original alphabet. If we
/// were to add new tokens after this training process, we couldn't make sure the merges pairs
/// exist as required.
///
#[derive(Clone, Debug)]
pub(super) struct AddedVocabulary {
/// Contains the mapping from String (token content) to ID. This map contains both special
/// tokens and classic added tokens that were added to the this vocabulary.
added_tokens_map: HashMap<String, u32>,
/// Contains the mapping from ID to AddedToken for all the added tokens, both special
/// and classic.
added_tokens_map_r: HashMap<u32, AddedToken>,
/// Contains only the classic AddedToken, in the specific order the user gave them.
added_tokens: Vec<AddedToken>,
/// Contains only the special AddedToken, in the specific order the user gave them.
special_tokens: Vec<AddedToken>,
/// A Set, containing all the special token for easy access while decoding. This let's
/// us remove them easily with an O(1) complexity.
special_tokens_set: HashSet<String>,
/// A RegexSet containing all the non-normalized patterns used to split on AddedTokens
split_trie: MatchingSet,
/// A RegexSet containing all the normalized patterns used to split on AddedTokens
split_normalized_trie: MatchingSet,
/// Whether or not special tokens should be splitted when encoding. This is equivalent to ignoring them
encode_special_tokens: bool,
}
impl AddedVocabulary {
pub fn new() -> Self {
let trie = AhoCorasickBuilder::new()
.match_kind(MatchKind::LeftmostLongest)
.build::<_, &&[u8]>([])
.expect("The trie should build correctly");
let normalized_trie = AhoCorasickBuilder::new()
.match_kind(MatchKind::LeftmostLongest)
.build::<_, &&[u8]>([])
.expect("The normalized trie should build correctly");
Self {
added_tokens_map: HashMap::new(),
added_tokens_map_r: HashMap::new(),
added_tokens: vec![],
special_tokens: vec![],
special_tokens_set: HashSet::new(),
split_trie: (trie, vec![]),
split_normalized_trie: (normalized_trie, vec![]),
encode_special_tokens: false,
}
}
/// Size of the additional vocabulary
#[allow(dead_code)] // Suppress the "method is never used" warning
pub fn len(&self) -> usize {
self.added_tokens_map.len()
}
/// Get the additional vocabulary
pub fn get_vocab(&self) -> &HashMap<String, u32> {
&self.added_tokens_map
}
/// Get the additional vocabulary with the AddedTokens
pub fn get_added_tokens_decoder(&self) -> &HashMap<u32, AddedToken> {
&self.added_tokens_map_r
}
/// Get the id matching one of our token if it exists
pub fn token_to_id(&self, token: &str, model: &impl Model) -> Option<u32> {
self.added_tokens_map
.get(token)
.copied()
.or_else(|| model.token_to_id(token))
}
/// Get the token matching the given id if it exists
pub fn id_to_token(&self, id: u32, model: &impl Model) -> Option<String> {
self.added_tokens_map_r
.get(&id)
.map(|t| t.content.clone())
.or_else(|| model.id_to_token(id))
}
//
pub fn set_encode_special_tokens(&mut self, value: bool) {
self.encode_special_tokens = value;
}
pub fn get_encode_special_tokens(&self) -> bool {
self.encode_special_tokens
}
/// Check if a token is a special token
pub fn is_special_token(&self, token: &str) -> bool {
self.special_tokens_set.contains(token)
}
/// Add some special tokens to the vocabulary
pub fn add_special_tokens<N: Normalizer>(
&mut self,
tokens: &[AddedToken],
model: &impl Model,
normalizer: Option<&N>,
) -> usize {
self.add_tokens(tokens, model, normalizer)
}
/// Add some tokens to the vocabulary
pub fn add_tokens<N: Normalizer>(
&mut self,
tokens: &[AddedToken],
model: &impl Model,
normalizer: Option<&N>,
) -> usize {
// Handle special tokens (if any)
for token in tokens {
if token.special
&& !token.content.is_empty()
&& !self.special_tokens_set.contains(&token.content)
{
self.special_tokens.push(token.to_owned());
self.special_tokens_set.insert(token.content.clone());
}
}
// Then we delegate to `add_tokens`, that will take care of refreshing added tokens too.
let mut ignored = 0;
for token in tokens {
if token.content.is_empty() || self.added_tokens_map_r.values().any(|val| val == token)
{
ignored += 1;
continue;
}
// If a token is already part of the vocabulary, we mark it as added
let new_id = if let Some(new_id) = self.token_to_id(&token.content, model) {
new_id
} else {
self.added_tokens_map.values().cloned().max().map_or(
model.get_vocab_size() as u32,
|max| {
if (max >= model.get_vocab_size() as u32) || model.get_vocab_size() == 0 {
max + 1
} else {
model.get_vocab_size() as u32
}
},
)
};
// Make sure we modify the previous entry
self.added_tokens_map
.entry(token.content.clone())
.and_modify(|old_id| *old_id = new_id)
.or_insert_with(|| new_id);
// Update the current revert operation
self.added_tokens_map_r
.entry(new_id)
.and_modify(|t| *t = token.clone())
.or_insert_with(|| token.clone());
// Make sure to remove previous entry (if the token gets a new id)
// Finally add the token to the classic set if special
if !self.special_tokens_set.contains(&token.content) {
self.added_tokens.push(token.clone());
}
}
self.refresh_added_tokens(model, normalizer);
// Return the number of added tokens
tokens.len() - ignored
}
/// Reconstruct our internal RegexSet when new tokens are added to the vocabulary.
///
/// We keep two different RegexSet, one that will take care of matching against the
/// non-normalized string, and one matching against the normalized one.
fn refresh_added_tokens<N: Normalizer>(&mut self, model: &impl Model, normalizer: Option<&N>) {
type TupleTokenId<'a> = (&'a AddedToken, u32);
let (normalized, non_normalized): (Vec<TupleTokenId>, Vec<TupleTokenId>) = self
.special_tokens
.iter()
.chain(self.added_tokens.iter())
.map(|token| {
(
token,
self.token_to_id(&token.content, model)
.expect("Missing additional token"),
)
})
.partition(|(token, _)| token.normalized);
let (tokens, ids): (Vec<&AddedToken>, Vec<u32>) = non_normalized.into_iter().unzip();
let trie = AhoCorasickBuilder::new()
.match_kind(MatchKind::LeftmostLongest)
.build(tokens.iter().map(|token| &token.content))
.expect("Failed to build tried when refreshing tokens");
self.split_trie = (trie, ids);
let (ntokens, nids): (Vec<&AddedToken>, Vec<u32>) = normalized.into_iter().unzip();
let patterns: Vec<_> = ntokens
.iter()
.map(|token| {
let mut content = NormalizedString::from(token.content.as_ref());
if let Some(n) = normalizer {
n.normalize(&mut content).unwrap();
}
content
})
.collect();
let normalized_trie = AhoCorasickBuilder::new()
.match_kind(MatchKind::LeftmostLongest)
.build(patterns.iter().map(|content| content.get()))
.expect("Failed to build tried when refreshing tokens (normalized)");
self.split_normalized_trie = (normalized_trie, nids);
}
/// Find any AddedToken in the given sentence, using the provided MatchingSet.
/// This method returns a list "splits", each of them being a pair of Offsets
/// and an optional ID if it is an AddedToken.
/// The list of splits cover the entire input string.
fn find_matches(&self, sentence: &str, split_re: &MatchingSet) -> Vec<(Option<u32>, Offsets)> {
if sentence.is_empty() {
return vec![(None, (0, 0))];
}
let mut start_offset = 0;
let mut splits = vec![];
for mat in split_re.0.find_iter(sentence) {
let mut start = mat.start();
let mut stop = mat.end();
let aho_id = mat.pattern();
let id = split_re.1[aho_id];
let added_token = &self.added_tokens_map_r.get(&id).unwrap();
if self.encode_special_tokens && self.special_tokens_set.contains(&added_token.content)
{
continue;
}
if added_token.single_word {
let start_space = start == 0 || !ends_with_word(&sentence[..start]);
let stop_space = stop == sentence.len() || !starts_with_word(&sentence[stop..]);
if !stop_space || !start_space {
// Discard not single word
continue;
}
}
if added_token.lstrip {
// This will be strictly inferior to start and in correct sentence offset
let newstart = space_leftmost_at_end(&sentence[..start]);
// The previous match could have already matched those spaces
// Ignore them if it's already matched
start = std::cmp::max(newstart, start_offset);
}
if added_token.rstrip {
// This will starting a the stop+1 character, so we need
// to add the previous stop value
stop += space_rightmost_at_start(&sentence[stop..])
}
if start_offset < start {
splits.push((None, (start_offset, start)));
}
splits.push((Some(id), (start, stop)));
start_offset = stop;
}
let total_byte_len = sentence.len();
if start_offset != total_byte_len {
splits.push((None, (start_offset, total_byte_len)));
}
splits
}
/// Split the input sentence to extract anything we found from the `MatchingSet`, as well as
/// the list of corresponding IDs
/// The list of IDs have the exact same number of elements than the Iterator.
fn split_with_indices(
&self,
sentence: NormalizedString,
split_re: &MatchingSet,
) -> Vec<(NormalizedString, Option<Vec<Token>>)> {
self.find_matches(sentence.get(), split_re)
.into_iter()
.map(|(id, byte_offsets)| {
let slice = sentence
.slice(Range::Normalized(byte_offsets.0..byte_offsets.1))
.expect("AddedVocabulary bad split");
if let Some(id) = id {
let value = slice.get().to_owned();
let len = value.len();
(slice, Some(vec![Token::new(id, value, (0, len))]))
} else {
(slice, None)
}
})
.collect()
}
/// Extract the additional vocabulary from the given sentence, normalizing it along the way.
///
/// Some tokens should match against their normalized representation, as well as the
/// non-normalized one. For example, when we expect to extract the token `yesterday` in the
/// input sentence `I read a book Yesterday`, if the normalizer is supposed to lowercase
/// everything, we expect a match.
pub fn extract_and_normalize<N: Normalizer>(
&self,
normalizer: Option<&N>,
sequence: &str,
) -> PreTokenizedString {
let mut pretokenized: PreTokenizedString = sequence.into();
// 1. We extract all the non-normalized tokens from the non-normalized string
pretokenized
.split(|_, sequence| Ok(self.split_with_indices(sequence, &self.split_trie)))
.expect("AddedVocabulary bad split");
// <s> normalized = False
// "I read a book <s>Hey" -> "I read a book", " <s>", "Hey"
// </s> normalized = True -> "▁</s>"
// "I read a book</s>Hey" -> "I read a book</s>Hey"
// Day normalized = True -> "Day"
// "I read a book monday" -> "I read a book monday"
// [DAY] normalized = False -> "Day"
// "I read a [DAY] monday" -> "I read a " "[DAY]", "book monday"
// 320055
// 2. Then extract the normalized tokens from the normalized pieces of the string
pretokenized
.split(|_, mut sequence| {
normalizer.map(|n| n.normalize(&mut sequence));
Ok(self.split_with_indices(sequence, &self.split_normalized_trie))
})
.expect("AddedVocabulary bad split");
// ["I read a book", " <s>", "Hey"] -> ["▁I read a book", "▁ <s>", "▁Hey"]
// ["▁I read a book", "▁ <s>", "▁Hey"] -> [.., "▁ ", "<s>", "▁Hey"]
// </s> normalized = True -> "▁</s>"
// "I read a book</s>Hey" -> ["▁I read a book", "<","/","s",">", "Hey"]
// "I read a " "[DAY]", "book monday" -> "i read a " "[day]", "book monday"
pretokenized
}
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct AddedTokenWithId {
/// The id assigned to this token
pub id: u32,
#[serde(flatten)]
/// The target AddedToken
pub token: AddedToken,
}
impl Serialize for AddedVocabulary {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut added_tokens = self
.added_tokens_map_r
.iter()
.map(|(id, token)| AddedTokenWithId {
id: *id,
token: token.clone(),
})
.collect::<Vec<_>>();
// We need to have these added tokens ordered by ascending ID
added_tokens.sort_unstable_by_key(|o| o.id);
let mut vocabulary = serializer.serialize_seq(Some(added_tokens.len()))?;
for token in added_tokens {
vocabulary.serialize_element(&token)?;
}
vocabulary.end()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::normalizers::utils::Lowercase;
use crate::normalizers::NormalizerWrapper;
use crate::{OffsetReferential, OffsetType, Result, Token, Trainer};
use std::path::{Path, PathBuf};
#[derive(Serialize, Deserialize)]
struct ModelMock {
vocab: HashMap<String, u32>,
vocab_r: HashMap<u32, String>,
}
impl ModelMock {
pub fn new<I>(iter: I) -> Self
where
I: IntoIterator<Item = &'static (&'static str, u32)>,
{
let vocab: HashMap<String, u32> = iter
.into_iter()
.map(|&(tok, id)| (tok.to_string(), id))
.collect();
Self {
vocab_r: vocab
.iter()
.map(|(tok, id)| (*id, tok.to_owned()))
.collect(),
vocab,
}
}
}
fn simplify_output(result: &'_ PreTokenizedString) -> Vec<(&'_ str, Option<Vec<u32>>)> {
result
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, _, tokens)| {
(
s,
tokens
.as_ref()
.map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()),
)
})
.collect::<Vec<_>>()
}
struct TrainerMock;
impl Trainer for TrainerMock {
type Model = ModelMock;
fn should_show_progress(&self) -> bool {
true
}
fn train(&self, _model: &mut ModelMock) -> Result<Vec<AddedToken>> {
unimplemented!()
}
fn feed<I, S, F>(&mut self, _iterator: I, _process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
unimplemented!()
}
}
impl Model for ModelMock {
type Trainer = TrainerMock;
fn tokenize(&self, _sequence: &str) -> Result<Vec<Token>> {
unimplemented!()
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.len()
}
fn save(&self, _folder: &Path, _name: Option<&str>) -> Result<Vec<PathBuf>> {
unimplemented!()
}
fn get_trainer(&self) -> Self::Trainer {
TrainerMock
}
}
#[test]
fn can_add_tokens() {
let model = ModelMock::new(&[("test", 0), ("tost", 1)]);
let mut vocab = AddedVocabulary::new();
let normalizer: Option<&NormalizerWrapper> = None;
// Add tokens normally
assert_eq!(
vocab.add_tokens(
&[AddedToken::from("added_token_1", false)],
&model,
normalizer
),
1
);
let vocab_len: usize = vocab.len();
assert_eq!(vocab_len, 1);
// Does not add multiple time the same token
assert_eq!(
vocab.add_tokens(
&[
AddedToken::from("added_token_2", false),
AddedToken::from("added_token_2", false)
],
&model,
normalizer
),
1
);
assert_eq!(vocab.len(), 2);
// Also adds tokens already covered by the model
let added_token = AddedToken::from("test", false);
assert_eq!(
vocab.add_tokens(&[added_token.clone()], &model, normalizer),
1
);
assert_eq!(vocab.len(), 3);
assert_eq!(vocab.get_added_tokens_decoder()[&0], added_token);
}
#[test]
fn can_add_special_tokens() {
let model = ModelMock::new(&[("test", 0), ("tost", 1)]);
let mut vocab = AddedVocabulary::new();
let normalizer: Option<&NormalizerWrapper> = None;
// Add tokens normally
assert_eq!(
vocab.add_special_tokens(
&[AddedToken::from("added_token_1", true)],
&model,
normalizer
),
1
);
assert_eq!(vocab.len(), 1);
// Does not add multiple time the same token
assert_eq!(
vocab.add_special_tokens(
&[
AddedToken::from("added_token_2", true),
AddedToken::from("added_token_2", true)
],
&model,
normalizer
),
1
);
assert_eq!(vocab.len(), 2);
// Can add tokens already covered by the model
assert_eq!(
vocab.add_special_tokens(&[AddedToken::from("test", true)], &model, normalizer),
1
);
assert_eq!(vocab.len(), 3); // New token was added
assert!(vocab.is_special_token("test"));
assert_eq!(
*vocab.get_added_tokens_decoder(),
HashMap::from([
(0, AddedToken::from("test", true)),
(2, AddedToken::from("added_token_1", true)),
(3, AddedToken::from("added_token_2", true)),
])
);
assert!(vocab.added_tokens_map.contains_key("test"));
assert!(vocab.added_tokens_map_r.contains_key(&0));
vocab.add_tokens(
&[
AddedToken::from("tost", true),
AddedToken::from("another_two", false),
],
&model,
normalizer,
);
assert_eq!(vocab.len(), 5); // New token was added
assert_eq!(vocab.get_vocab()["another_two"], 4); // New token was added, but the index is not the length of the vocab
// Let's add an already added token again
assert_eq!(
vocab.add_special_tokens(&[AddedToken::from("another_two", true)], &model, normalizer),
1
);
assert_eq!(vocab.len(), 5); // Token was already there
assert_eq!(vocab.get_vocab()["another_two"], 4); // Token idx not changed
// Just checking that we can set the content of the string in rust
let mut token: AddedToken = AddedToken::from("Hey", false);
token.content = "hey".to_string();
assert_eq!(token.content, "hey"); // Token was already there
token.special = true;
assert!(token.special); // Token was already there
}
#[test]
fn can_extract_added_tokens() {
// Is able to extract both normal and special tokens
let model = ModelMock::new(&[]);
let mut vocab = AddedVocabulary::new();
let normalizer: Option<&NormalizerWrapper> = None;
vocab.add_tokens(
&[
AddedToken::from("my", false),
AddedToken::from("name", false),
],
&model,
normalizer,
);
vocab.add_special_tokens(
&[
AddedToken::from("[CLS]", true),
AddedToken::from("[SEP]", true),
],
&model,
normalizer,
);
let result = vocab.extract_and_normalize(normalizer, "[CLS] My name is Anthony [SEP]");
assert_eq!(
result
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, _, tokens)| (
s,
tokens
.as_ref()
.map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>())
))
.collect::<Vec<_>>(),
vec![
("[CLS]", Some(vec![2])),
(" My ", None),
("name", Some(vec![1])),
(" is Anthony ", None),
("[SEP]", Some(vec![3]))
]
);
}
#[test]
fn options_use_cases() {
// Is able to extract both normal and special tokens, with various options (lstrip, rstrip,
// single_word, normalized)
let model = ModelMock::new(&[]);
let normalizer = Lowercase;
let mut vocab = AddedVocabulary::new();
vocab.add_tokens(
&[
AddedToken::from("my", false).lstrip(true).rstrip(true),
AddedToken::from("name", false),
AddedToken::from("ony", false).single_word(true),
],
&model,
Some(&normalizer),
);
vocab.add_special_tokens(
&[
AddedToken::from("[CLS]", true),
AddedToken::from("[SEP]", true),
],
&model,
Some(&normalizer),
);
let result =
vocab.extract_and_normalize(Some(&normalizer), "[CLS] My name is Anthony [SEP]");
assert_eq!(
simplify_output(&result),
vec![
("[CLS]", Some(vec![3])),
// This one includes both spaces because of the lstrip & rstrip
// And it matches because normalized == true
(" my ", Some(vec![0])),
("name", Some(vec![1])),
// `ony` is not extracted here thanks to single_word
(" is anthony ", None),
("[SEP]", Some(vec![4])),
]
);
}
#[test]
fn empty_matches() {
let vocab = AddedVocabulary::new();
let matches = vocab.find_matches("", &vocab.split_trie);
assert_eq!(matches, vec![(None, (0, 0))]);
}
#[test]
fn test_single_word_is_correct() {
// Is able to extract both normal and special tokens, with various options (lstrip, rstrip,
// single_word, normalized)
let model = ModelMock::new(&[]);
let mut vocab = AddedVocabulary::new();
let normalizer = Lowercase;
vocab.add_tokens(
&[AddedToken::from("<mask>", false).single_word(true)],
&model,
Some(&normalizer),
);
// Left, in the middle, non single world left, non single word right, end of sentence valid
let result = vocab.extract_and_normalize(
Some(&normalizer),
"<mask> My name <mask> A<mask> <mask>ony <mask>",
);
assert_eq!(
simplify_output(&result),
vec![
("<mask>", Some(vec![0])),
(" my name ", None),
("<mask>", Some(vec![0])),
(" a<mask> <mask>ony ", None),
("<mask>", Some(vec![0]))
]
);
}
#[test]
fn test_single_word_is_unicode_correct() {
let model = ModelMock::new(&[]);
let mut vocab = AddedVocabulary::new();
let normalizer = Lowercase;
assert_eq!(vocab.len(), 0);
vocab.add_tokens(
&[AddedToken::from("<mask>", false).single_word(true)],
&model,
Some(&normalizer),
);
let result = vocab.extract_and_normalize(Some(&normalizer), "<mask>, <mask>- ◌̰<mask>");
assert_eq!(
simplify_output(&result),
vec![
// Punctuation is not word
("<mask>", Some(vec![0])),
(", ", None),
// dash is not word
("<mask>", Some(vec![0])),
// This is unicode combining mark character and is word: https://en.wikipedia.org/wiki/Combining_Diacritical_Marks
("- ◌̰<mask>", None),
]
);
}
#[test]
fn test_lstrip_unicode_space() {
let model = ModelMock::new(&[]);
let mut vocab = AddedVocabulary::new();
let normalizer = Lowercase;
vocab.add_tokens(
&[AddedToken::from("<mask>", false)
.lstrip(true)
.rstrip(true)
.single_word(true)],
&model,
Some(&normalizer),
);
let result = vocab
.extract_and_normalize(Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000}");
assert_eq!(
simplify_output(&result),
vec![
("hi", None),
// Regular space
(" <mask> ", Some(vec![0])),
("there", None),
// \t is a spacing character
("\t<mask>\t", Some(vec![0])),
// Non overlapping
// \u{2000} is mongolian vowel separator: https://jkorpela.fi/chars/spaces.html
("<mask>\u{2000}", Some(vec![0])),
]
);
}
#[test]
fn test_encode_special_tokens() {
let model = ModelMock::new(&[]);
let mut vocab = AddedVocabulary::new();
let normalizer = Lowercase;
vocab.add_tokens(
&[
AddedToken::from("<mask>", true)
.lstrip(true)
.rstrip(true)
.single_word(true),
AddedToken::from("ask>", false),
AddedToken::from("<pad>", true),
],
&model,
Some(&normalizer),
);
vocab.set_encode_special_tokens(true);
let result = vocab.extract_and_normalize(
Some(&normalizer),
"Hi <mask> there\t<mask>\t<mask>\u{2000} <pad> <mask><pad><pad>",
);
assert_eq!(
simplify_output(&result),
vec![
("hi <m", None),
("ask>", Some(vec![1])),
(" there\t<m", None),
("ask>", Some(vec![1])),
("\t<m", None),
("ask>", Some(vec![1])),
("\u{2000} <pad> <m", None),
("ask>", Some(vec![1])),
("<pad><pad>", None)
]
);
vocab.set_encode_special_tokens(false);
let result = vocab.extract_and_normalize(
Some(&normalizer),
"Hi <mask> there\t<mask>\t<mask>\u{2000} <pad> <mask><pad><pad>",
);
assert_eq!(
simplify_output(&result),
vec![
("hi", None),
(" <mask> ", Some(vec![0])),
("there", None),
("\t<mask>\t", Some(vec![0])),
("<mask>\u{2000} ", Some(vec![0])),
("<pad>", Some(vec![2])),
(" <mask>", Some(vec![0])),
("<pad>", Some(vec![2])),
("<pad>", Some(vec![2]))
]
);
}
}
| tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs",
"repo_id": "tokenizers",
"token_count": 16897
} | 227 |
use crate::tokenizer::{Encoding, Result};
use serde::{Deserialize, Serialize};
use std::cmp;
use std::mem;
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, Default)]
pub enum TruncationDirection {
Left,
#[default]
Right,
}
impl std::convert::AsRef<str> for TruncationDirection {
fn as_ref(&self) -> &str {
match self {
TruncationDirection::Left => "left",
TruncationDirection::Right => "right",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TruncationParams {
#[serde(default)]
pub direction: TruncationDirection,
pub max_length: usize,
pub strategy: TruncationStrategy,
pub stride: usize,
}
impl Default for TruncationParams {
fn default() -> Self {
Self {
max_length: 512,
strategy: TruncationStrategy::default(),
stride: 0,
direction: TruncationDirection::default(),
}
}
}
#[derive(thiserror::Error, Debug)]
pub enum TruncationError {
/// We are supposed to truncate the pair sequence, but it has not been provided.
#[error("Truncation error: Second sequence not provided")]
SecondSequenceNotProvided,
/// We cannot truncate the target sequence enough to respect the provided max length.
#[error("Truncation error: Sequence to truncate too short to respect the provided max_length")]
SequenceTooShort,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)]
pub enum TruncationStrategy {
LongestFirst,
OnlyFirst,
OnlySecond,
}
impl Default for TruncationStrategy {
fn default() -> Self {
Self::LongestFirst
}
}
impl std::convert::AsRef<str> for TruncationStrategy {
fn as_ref(&self) -> &str {
match self {
Self::LongestFirst => "longest_first",
Self::OnlyFirst => "only_first",
Self::OnlySecond => "only_second",
}
}
}
pub fn truncate_encodings(
mut encoding: Encoding,
mut pair_encoding: Option<Encoding>,
params: &TruncationParams,
) -> Result<(Encoding, Option<Encoding>)> {
if params.max_length == 0 {
encoding.truncate(0, params.stride, params.direction);
if let Some(other_encoding) = pair_encoding.as_mut() {
other_encoding.truncate(0, params.stride, params.direction);
}
return Ok((encoding, pair_encoding));
}
let total_length = encoding.get_ids().len()
+ pair_encoding
.as_ref()
.map(|e| e.get_ids().len())
.unwrap_or(0);
let to_remove = if total_length > params.max_length {
total_length - params.max_length
} else {
return Ok((encoding, pair_encoding));
};
match params.strategy {
TruncationStrategy::LongestFirst => {
if let Some(other_encoding) = pair_encoding.as_mut() {
// Assuming n1 <= n2, there are 3 cases
// Case 1:
// No truncation needs to be performed.
// This scenario is handled before the match.
// Case 2:
// Only the longer input needs to be truncated.
// n1 = n1
// n2 = max_length - n1
// Case 3:
// Both inputs must be truncated.
// n1 = max_length / 2
// n2 = n1 + max_length % 2
let mut n1 = encoding.get_ids().len();
let mut n2 = other_encoding.get_ids().len();
let mut swap = false;
// Ensure n1 is the length of the shortest input
if n1 > n2 {
swap = true;
mem::swap(&mut n1, &mut n2);
}
if n1 > params.max_length {
// This needs to be a special case
// to avoid max_length - n1 < 0
// since n1 and n2 are unsigned
n2 = n1;
} else {
n2 = cmp::max(n1, params.max_length - n1);
}
if n1 + n2 > params.max_length {
n1 = params.max_length / 2;
n2 = n1 + params.max_length % 2;
}
// Swap lengths if we swapped previosuly
if swap {
mem::swap(&mut n1, &mut n2);
}
encoding.truncate(n1, params.stride, params.direction);
other_encoding.truncate(n2, params.stride, params.direction);
} else {
encoding.truncate(total_length - to_remove, params.stride, params.direction);
}
}
TruncationStrategy::OnlyFirst | TruncationStrategy::OnlySecond => {
let target = if params.strategy == TruncationStrategy::OnlyFirst {
Ok(&mut encoding)
} else if let Some(encoding) = pair_encoding.as_mut() {
Ok(encoding)
} else {
Err(Box::new(TruncationError::SecondSequenceNotProvided))
}?;
let target_len = target.get_ids().len();
if target_len > to_remove {
target.truncate(target_len - to_remove, params.stride, params.direction);
} else {
return Err(Box::new(TruncationError::SequenceTooShort));
}
}
}
Ok((encoding, pair_encoding))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::Encoding;
use std::collections::HashMap;
fn get_empty() -> Encoding {
Encoding::new(
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
HashMap::new(),
)
}
fn get_short() -> Encoding {
Encoding::new(
vec![1, 2],
vec![0, 0],
vec![String::from("a"), String::from("b")],
vec![Some(0), Some(1)],
vec![(0, 1), (1, 2)],
vec![0, 0],
vec![1, 1],
vec![],
HashMap::new(),
)
}
fn get_medium() -> Encoding {
Encoding::new(
vec![3, 4, 5, 6],
vec![0, 0, 0, 0],
vec![
String::from("d"),
String::from("e"),
String::from("f"),
String::from("g"),
],
vec![Some(0), Some(1), Some(2), Some(3)],
vec![(0, 1), (1, 2), (2, 3), (3, 4)],
vec![0, 0, 0, 0],
vec![1, 1, 1, 1],
vec![],
HashMap::new(),
)
}
fn get_long() -> Encoding {
Encoding::new(
vec![7, 8, 9, 10, 11, 12, 13, 14],
vec![0, 0, 0, 0, 0, 0, 0, 0],
vec![
String::from("h"),
String::from("i"),
String::from("j"),
String::from("k"),
String::from("l"),
String::from("m"),
String::from("n"),
String::from("o"),
],
vec![
Some(0),
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
Some(7),
],
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(6, 8),
],
vec![0, 0, 0, 0, 0, 0, 0, 0],
vec![1, 1, 1, 1, 1, 1, 1, 1],
vec![],
HashMap::new(),
)
}
fn truncate_and_assert(
encoding1: Encoding,
encoding2: Encoding,
params: &TruncationParams,
n1: usize,
n2: usize,
) {
match truncate_encodings(encoding1, Some(encoding2), params) {
Ok((e1, Some(e2))) => {
assert!(e1.get_ids().len() == n1);
assert!(e2.get_ids().len() == n2);
}
_ => panic!(),
};
}
#[test]
fn truncate_encodings_longest_first() {
let params = TruncationParams {
max_length: 7,
strategy: TruncationStrategy::LongestFirst,
stride: 0,
direction: TruncationDirection::Right,
};
truncate_and_assert(get_empty(), get_empty(), ¶ms, 0, 0);
truncate_and_assert(get_empty(), get_short(), ¶ms, 0, 2);
truncate_and_assert(get_empty(), get_medium(), ¶ms, 0, 4);
truncate_and_assert(get_empty(), get_long(), ¶ms, 0, 7);
truncate_and_assert(get_short(), get_empty(), ¶ms, 2, 0);
truncate_and_assert(get_short(), get_short(), ¶ms, 2, 2);
truncate_and_assert(get_short(), get_medium(), ¶ms, 2, 4);
truncate_and_assert(get_short(), get_long(), ¶ms, 2, 5);
truncate_and_assert(get_medium(), get_empty(), ¶ms, 4, 0);
truncate_and_assert(get_medium(), get_short(), ¶ms, 4, 2);
truncate_and_assert(get_medium(), get_medium(), ¶ms, 3, 4);
truncate_and_assert(get_medium(), get_long(), ¶ms, 3, 4);
truncate_and_assert(get_long(), get_empty(), ¶ms, 7, 0);
truncate_and_assert(get_long(), get_short(), ¶ms, 5, 2);
truncate_and_assert(get_long(), get_medium(), ¶ms, 4, 3);
truncate_and_assert(get_long(), get_long(), ¶ms, 3, 4);
}
#[test]
fn truncate_encodings_empty() {
let params = TruncationParams {
max_length: 0,
strategy: TruncationStrategy::LongestFirst,
stride: 0,
direction: TruncationDirection::Right,
};
truncate_and_assert(get_empty(), get_short(), ¶ms, 0, 0);
truncate_and_assert(get_medium(), get_medium(), ¶ms, 0, 0);
truncate_and_assert(get_long(), get_long(), ¶ms, 0, 0);
}
#[test]
fn test_deserialize_defaults() {
let old_truncation_params = r#"{"max_length":256,"strategy":"LongestFirst","stride":0}"#;
let params: TruncationParams = serde_json::from_str(old_truncation_params).unwrap();
assert_eq!(params.direction, TruncationDirection::Right);
}
}
| tokenizers/tokenizers/src/utils/truncation.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/truncation.rs",
"repo_id": "tokenizers",
"token_count": 5473
} | 228 |
#!/bin/bash
source ~/.bashrc
echo "running docker-entrypoint.sh"
conda activate container
echo $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS
echo "printed TPU info"
export XRT_TPU_CONFIG="tpu_worker;0;${KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}"
exec "$@"#!/bin/bash
| transformers/docker/transformers-pytorch-tpu/docker-entrypoint.sh/0 | {
"file_path": "transformers/docker/transformers-pytorch-tpu/docker-entrypoint.sh",
"repo_id": "transformers",
"token_count": 112
} | 229 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Generation with LLMs
[[open-in-colab]]
LLMs (Large Language Models) sind die Schlüsselkomponente bei der Texterstellung. Kurz gesagt, bestehen sie aus großen, vortrainierten Transformationsmodellen, die darauf trainiert sind, das nächste Wort (oder genauer gesagt Token) aus einem Eingabetext vorherzusagen. Da sie jeweils ein Token vorhersagen, müssen Sie etwas Aufwändigeres tun, um neue Sätze zu generieren, als nur das Modell aufzurufen - Sie müssen eine autoregressive Generierung durchführen.
Die autoregressive Generierung ist ein Verfahren zur Inferenzzeit, bei dem ein Modell mit seinen eigenen generierten Ausgaben iterativ aufgerufen wird, wenn einige anfängliche Eingaben vorliegen. In 🤗 Transformers wird dies von der Methode [`~generation.GenerationMixin.generate`] übernommen, die allen Modellen mit generativen Fähigkeiten zur Verfügung steht.
Dieses Tutorial zeigt Ihnen, wie Sie:
* Text mit einem LLM generieren
* Vermeiden Sie häufige Fallstricke
* Nächste Schritte, damit Sie das Beste aus Ihrem LLM herausholen können
Bevor Sie beginnen, stellen Sie sicher, dass Sie alle erforderlichen Bibliotheken installiert haben:
```bash
pip install transformers bitsandbytes>=0.39.0 -q
```
## Text generieren
Ein Sprachmodell, das für [causal language modeling](tasks/language_modeling) trainiert wurde, nimmt eine Folge von Text-Token als Eingabe und gibt die Wahrscheinlichkeitsverteilung für das nächste Token zurück.
<!-- [GIF 1 -- FWD PASS] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov"
></video>
<figcaption>"Forward pass of an LLM"</figcaption>
</figure>
Ein wichtiger Aspekt der autoregressiven Generierung mit LLMs ist die Auswahl des nächsten Tokens aus dieser Wahrscheinlichkeitsverteilung. In diesem Schritt ist alles möglich, solange Sie am Ende ein Token für die nächste Iteration haben. Das heißt, es kann so einfach sein wie die Auswahl des wahrscheinlichsten Tokens aus der Wahrscheinlichkeitsverteilung oder so komplex wie die Anwendung von einem Dutzend Transformationen vor der Stichprobenziehung aus der resultierenden Verteilung.
<!-- [GIF 2 -- TEXT GENERATION] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov"
></video>
<figcaption>"Die autoregressive Generierung wählt iterativ das nächste Token aus einer Wahrscheinlichkeitsverteilung aus, um Text zu erzeugen"</figcaption>
</figure>
Der oben dargestellte Prozess wird iterativ wiederholt, bis eine bestimmte Abbruchbedingung erreicht ist. Im Idealfall wird die Abbruchbedingung vom Modell vorgegeben, das lernen sollte, wann es ein Ende-der-Sequenz-Token (EOS) ausgeben muss. Ist dies nicht der Fall, stoppt die Generierung, wenn eine vordefinierte Maximallänge erreicht ist.
Damit sich Ihr Modell so verhält, wie Sie es für Ihre Aufgabe erwarten, müssen Sie den Schritt der Token-Auswahl und die Abbruchbedingung richtig einstellen. Aus diesem Grund haben wir zu jedem Modell eine [`~generation.GenerationConfig`]-Datei, die eine gute generative Standardparametrisierung enthält und zusammen mit Ihrem Modell geladen wird.
Lassen Sie uns über Code sprechen!
<Tip>
Wenn Sie an der grundlegenden Verwendung von LLMs interessiert sind, ist unsere High-Level-Schnittstelle [`Pipeline`](pipeline_tutorial) ein guter Ausgangspunkt. LLMs erfordern jedoch oft fortgeschrittene Funktionen wie Quantisierung und Feinsteuerung des Token-Auswahlschritts, was am besten über [`~generation.GenerationMixin.generate`] erfolgt. Die autoregressive Generierung mit LLMs ist ebenfalls ressourcenintensiv und sollte für einen angemessenen Durchsatz auf einer GPU ausgeführt werden.
</Tip>
<!-- TODO: update example to llama 2 (or a newer popular baseline) when it becomes ungated -->
Zunächst müssen Sie das Modell laden.
```py
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained(
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
... )
```
Sie werden zwei Flags in dem Aufruf `from_pretrained` bemerken:
- `device_map` stellt sicher, dass das Modell auf Ihre GPU(s) übertragen wird
- `load_in_4bit` wendet [dynamische 4-Bit-Quantisierung](main_classes/quantization) an, um die Ressourcenanforderungen massiv zu reduzieren
Es gibt noch andere Möglichkeiten, ein Modell zu initialisieren, aber dies ist eine gute Grundlage, um mit einem LLM zu beginnen.
Als nächstes müssen Sie Ihre Texteingabe mit einem [tokenizer](tokenizer_summary) vorverarbeiten.
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
```
Die Variable `model_inputs` enthält die tokenisierte Texteingabe sowie die Aufmerksamkeitsmaske. Obwohl [`~generation.GenerationMixin.generate`] sein Bestes tut, um die Aufmerksamkeitsmaske abzuleiten, wenn sie nicht übergeben wird, empfehlen wir, sie für optimale Ergebnisse wann immer möglich zu übergeben.
Rufen Sie schließlich die Methode [`~generation.GenerationMixin.generate`] auf, um die generierten Token zurückzugeben, die vor dem Drucken in Text umgewandelt werden sollten.
```py
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A list of colors: red, blue, green, yellow, black, white, and brown'
```
Und das war's! Mit ein paar Zeilen Code können Sie sich die Macht eines LLM zunutze machen.
## Häufige Fallstricke
Es gibt viele [Generierungsstrategien](generation_strategies), und manchmal sind die Standardwerte für Ihren Anwendungsfall vielleicht nicht geeignet. Wenn Ihre Ausgaben nicht mit dem übereinstimmen, was Sie erwarten, haben wir eine Liste der häufigsten Fallstricke erstellt und wie Sie diese vermeiden können.
```py
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
>>> model = AutoModelForCausalLM.from_pretrained(
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
... )
```
### Generierte Ausgabe ist zu kurz/lang
Wenn in der Datei [`~generation.GenerationConfig`] nichts angegeben ist, gibt `generate` standardmäßig bis zu 20 Token zurück. Wir empfehlen dringend, `max_new_tokens` in Ihrem `generate`-Aufruf manuell zu setzen, um die maximale Anzahl neuer Token zu kontrollieren, die zurückgegeben werden können. Beachten Sie, dass LLMs (genauer gesagt, [decoder-only models](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)) auch die Eingabeaufforderung als Teil der Ausgabe zurückgeben.
```py
>>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda")
>>> # By default, the output will contain up to 20 tokens
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5'
>>> # Setting `max_new_tokens` allows you to control the maximum length
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=50)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,'
```
### Falscher Generierungsmodus
Standardmäßig und sofern nicht in der Datei [`~generation.GenerationConfig`] angegeben, wählt `generate` bei jeder Iteration das wahrscheinlichste Token aus (gierige Dekodierung). Je nach Aufgabe kann dies unerwünscht sein; kreative Aufgaben wie Chatbots oder das Schreiben eines Aufsatzes profitieren vom Sampling. Andererseits profitieren Aufgaben, bei denen es auf die Eingabe ankommt, wie z.B. Audiotranskription oder Übersetzung, von der gierigen Dekodierung. Aktivieren Sie das Sampling mit `do_sample=True`. Mehr zu diesem Thema erfahren Sie in diesem [Blogbeitrag](https://huggingface.co/blog/how-to-generate).
```py
>>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility
>>> from transformers import set_seed
>>> set_seed(0)
>>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda")
>>> # LLM + greedy decoding = repetitive, boring output
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat. I am a cat. I am a cat. I am a cat'
>>> # With sampling, the output becomes more creative!
>>> generated_ids = model.generate(**model_inputs, do_sample=True)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat.\nI just need to be. I am always.\nEvery time'
```
### Falsche Auffüllseite
LLMs sind [decoder-only](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)-Architekturen, d.h. sie iterieren weiter über Ihre Eingabeaufforderung. Wenn Ihre Eingaben nicht die gleiche Länge haben, müssen sie aufgefüllt werden. Da LLMs nicht darauf trainiert sind, mit aufgefüllten Token fortzufahren, muss Ihre Eingabe links aufgefüllt werden. Vergessen Sie auch nicht, die Aufmerksamkeitsmaske an generate zu übergeben!
```py
>>> # The tokenizer initialized above has right-padding active by default: the 1st sequence,
>>> # which is shorter, has padding on the right side. Generation fails.
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)[0]
''
>>> # With left-padding, it works as expected!
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b", padding_side="left")
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'1, 2, 3, 4, 5, 6,'
```
<!-- TODO: when the prompting guide is ready, mention the importance of setting the right prompt in this section -->
## Weitere Ressourcen
Während der Prozess der autoregressiven Generierung relativ einfach ist, kann die optimale Nutzung Ihres LLM ein schwieriges Unterfangen sein, da es viele bewegliche Teile gibt. Für Ihre nächsten Schritte, die Ihnen helfen, tiefer in die LLM-Nutzung und das Verständnis einzutauchen:
<!-- TODO: mit neuen Anleitungen vervollständigen -->
### Fortgeschrittene Nutzung generieren
1. [Leitfaden](generation_strategies) zur Steuerung verschiedener Generierungsmethoden, zur Einrichtung der Generierungskonfigurationsdatei und zum Streaming der Ausgabe;
2. API-Referenz zu [`~generation.GenerationConfig`], [`~generation.GenerationMixin.generate`] und [generate-bezogene Klassen](internal/generation_utils).
### LLM-Ranglisten
1. [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), das sich auf die Qualität der Open-Source-Modelle konzentriert;
2. [Open LLM-Perf Leaderboard](https://huggingface.co/spaces/optimum/llm-perf-leaderboard), das sich auf den LLM-Durchsatz konzentriert.
### Latenz und Durchsatz
1. [Leitfaden](main_classes/quantization) zur dynamischen Quantisierung, der Ihnen zeigt, wie Sie Ihren Speicherbedarf drastisch reduzieren können.
### Verwandte Bibliotheken
1. [text-generation-inference](https://github.com/huggingface/text-generation-inference), ein produktionsreifer Server für LLMs;
2. [`optimum`](https://github.com/huggingface/optimum), eine Erweiterung von 🤗 Transformers, die für bestimmte Hardware-Geräte optimiert.
| transformers/docs/source/de/llm_tutorial.md/0 | {
"file_path": "transformers/docs/source/de/llm_tutorial.md",
"repo_id": "transformers",
"token_count": 4767
} | 230 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# How to create a custom pipeline?
In this guide, we will see how to create a custom pipeline and share it on the [Hub](https://hf.co/models) or add it to the
🤗 Transformers library.
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
as it makes compatibility easier (even through other languages via JSON). Those will be the `inputs` of the
pipeline (`preprocess`).
Then define the `outputs`. Same policy as the `inputs`. The simpler, the better. Those will be the outputs of
`postprocess` method.
Start by inheriting the base class `Pipeline` with the 4 methods needed to implement `preprocess`,
`_forward`, `postprocess`, and `_sanitize_parameters`.
```python
from transformers import Pipeline
class MyPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
return preprocess_kwargs, {}, {}
def preprocess(self, inputs, maybe_arg=2):
model_input = Tensor(inputs["input_ids"])
return {"model_input": model_input}
def _forward(self, model_inputs):
# model_inputs == {"model_input": model_input}
outputs = self.model(**model_inputs)
# Maybe {"logits": Tensor(...)}
return outputs
def postprocess(self, model_outputs):
best_class = model_outputs["logits"].softmax(-1)
return best_class
```
The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing
pre/postprocessing on the CPU on different threads
`preprocess` will take the originally defined inputs, and turn them into something feedable to the model. It might
contain more information and is usually a `Dict`.
`_forward` is the implementation detail and is not meant to be called directly. `forward` is the preferred
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
linked to a real model it belongs in the `_forward` method, anything else is in the preprocess/postprocess.
`postprocess` methods will take the output of `_forward` and turn it into the final output that was decided
earlier.
`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
time `pipeline(...., maybe_arg=4)` or at call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
The returns of `_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to `preprocess`,
`_forward`, and `postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
allows to keep the default arguments in the function definition which is always more "natural".
A classic example would be a `top_k` argument in the post processing in classification tasks.
```python
>>> pipe = pipeline("my-new-task")
>>> pipe("This is a test")
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
>>> pipe("This is a test", top_k=2)
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
```
In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit
`_sanitize_parameters` to allow this new parameter.
```python
def postprocess(self, model_outputs, top_k=5):
best_class = model_outputs["logits"].softmax(-1)
# Add logic to handle top_k
return best_class
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
postprocess_kwargs = {}
if "top_k" in kwargs:
postprocess_kwargs["top_k"] = kwargs["top_k"]
return preprocess_kwargs, {}, postprocess_kwargs
```
Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy
without requiring users to understand new kinds of objects. It's also relatively common to support many different types
of arguments for ease of use (audio files, which can be filenames, URLs or pure bytes)
## Adding it to the list of supported tasks
To register your `new-task` to the list of supported tasks, you have to add it to the `PIPELINE_REGISTRY`:
```python
from transformers.pipelines import PIPELINE_REGISTRY
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
)
```
You can specify a default model if you want, in which case it should come with a specific revision (which can be the name of a branch or a commit hash, here we took `"abcdef"`) as well as the type:
```python
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
default={"pt": ("user/awesome_model", "abcdef")},
type="text", # current support type: text, audio, image, multimodal
)
```
## Share your pipeline on the Hub
To share your custom pipeline on the Hub, you just have to save the custom code of your `Pipeline` subclass in a
python file. For instance, let's say we want to use a custom pipeline for sentence pair classification like this:
```py
import numpy as np
from transformers import Pipeline
def softmax(outputs):
maxes = np.max(outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
```
The implementation is framework agnostic, and will work for PyTorch and TensorFlow models. If we have saved this in
a file named `pair_classification.py`, we can then import it and register it like this:
```py
from pair_classification import PairClassificationPipeline
from transformers.pipelines import PIPELINE_REGISTRY
from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification,
tf_model=TFAutoModelForSequenceClassification,
)
```
Once this is done, we can use it with a pretrained model. For instance `sgugger/finetuned-bert-mrpc` has been
fine-tuned on the MRPC dataset, which classifies pairs of sentences as paraphrases or not.
```py
from transformers import pipeline
classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc")
```
Then we can share it on the Hub by using the `save_pretrained` method in a `Repository`:
```py
from huggingface_hub import Repository
repo = Repository("test-dynamic-pipeline", clone_from="{your_username}/test-dynamic-pipeline")
classifier.save_pretrained("test-dynamic-pipeline")
repo.push_to_hub()
```
This will copy the file where you defined `PairClassificationPipeline` inside the folder `"test-dynamic-pipeline"`,
along with saving the model and tokenizer of the pipeline, before pushing everything into the repository
`{your_username}/test-dynamic-pipeline`. After that, anyone can use it as long as they provide the option
`trust_remote_code=True`:
```py
from transformers import pipeline
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
```
## Add the pipeline to 🤗 Transformers
If you want to contribute your pipeline to 🤗 Transformers, you will need to add a new module in the `pipelines` submodule
with the code of your pipeline, then add it to the list of tasks defined in `pipelines/__init__.py`.
Then you will need to add tests. Create a new file `tests/test_pipelines_MY_PIPELINE.py` with examples of the other tests.
The `run_pipeline_test` function will be very generic and run on small random models on every possible
architecture as defined by `model_mapping` and `tf_model_mapping`.
This is very important to test future compatibility, meaning if someone adds a new model for
`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
impossible to check for actual values, that's why there is a helper `ANY` that will simply attempt to match the
output of the pipeline TYPE.
You also *need* to implement 2 (ideally 4) tests.
- `test_small_model_pt` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
and test the pipeline outputs. The results should be the same as `test_small_model_tf`.
- `test_small_model_tf` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
and test the pipeline outputs. The results should be the same as `test_small_model_pt`.
- `test_large_model_pt` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
sure there is no drift in future releases.
- `test_large_model_tf` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
sure there is no drift in future releases.
| transformers/docs/source/en/add_new_pipeline.md/0 | {
"file_path": "transformers/docs/source/en/add_new_pipeline.md",
"repo_id": "transformers",
"token_count": 3395
} | 231 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Fully Sharded Data Parallel
[Fully Sharded Data Parallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) is a data parallel method that shards a model's parameters, gradients and optimizer states across the number of available GPUs (also called workers or *rank*). Unlike [DistributedDataParallel (DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html), FSDP reduces memory-usage because a model is replicated on each GPU. This improves GPU memory-efficiency and allows you to train much larger models on fewer GPUs. FSDP is integrated with the Accelerate, a library for easily managing training in distributed environments, which means it is available for use from the [`Trainer`] class.
Before you start, make sure Accelerate is installed and at least PyTorch 2.1.0 or newer.
```bash
pip install accelerate
```
## FSDP configuration
To start, run the [`accelerate config`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-config) command to create a configuration file for your training environment. Accelerate uses this configuration file to automatically setup the correct training environment based on your selected training options in `accelerate config`.
```bash
accelerate config
```
When you run `accelerate config`, you'll be prompted with a series of options to configure your training environment. This section covers some of the most important FSDP options. To learn more about the other available FSDP options, take a look at the [fsdp_config](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.fsdp_config) parameters.
### Sharding strategy
FSDP offers a number of sharding strategies to select from:
* `FULL_SHARD` - shards model parameters, gradients and optimizer states across workers; select `1` for this option
* `SHARD_GRAD_OP`- shard gradients and optimizer states across workers; select `2` for this option
* `NO_SHARD` - don't shard anything (this is equivalent to DDP); select `3` for this option
* `HYBRID_SHARD` - shard model parameters, gradients and optimizer states within each worker where each worker also has a full copy; select `4` for this option
* `HYBRID_SHARD_ZERO2` - shard gradients and optimizer states within each worker where each worker also has a full copy; select `5` for this option
This is enabled by the `fsdp_sharding_strategy` flag.
### CPU offload
You could also offload parameters and gradients when they are not in use to the CPU to save even more GPU memory and help you fit large models where even FSDP may not be sufficient. This is enabled by setting `fsdp_offload_params: true` when running `accelerate config`.
### Wrapping policy
FSDP is applied by wrapping each layer in the network. The wrapping is usually applied in a nested way where the full weights are discarded after each forward pass to save memory for use in the next layer. The *auto wrapping* policy is the simplest way to implement this and you don't need to change any code. You should select `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP` to wrap a Transformer layer and `fsdp_transformer_layer_cls_to_wrap` to specify which layer to wrap (for example `BertLayer`).
Otherwise, you can choose a size-based wrapping policy where FSDP is applied to a layer if it exceeds a certain number of parameters. This is enabled by setting `fsdp_wrap_policy: SIZE_BASED_WRAP` and `min_num_param` to the desired size threshold.
### Checkpointing
Intermediate checkpoints should be saved with `fsdp_state_dict_type: SHARDED_STATE_DICT` because saving the full state dict with CPU offloading on rank 0 takes a lot of time and often results in `NCCL Timeout` errors due to indefinite hanging during broadcasting. You can resume training with the sharded state dicts with the [`~accelerate.Accelerator.load_state`]` method.
```py
# directory containing checkpoints
accelerator.load_state("ckpt")
```
However, when training ends, you want to save the full state dict because sharded state dict is only compatible with FSDP.
```py
if trainer.is_fsdp_enabled:
trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT")
trainer.save_model(script_args.output_dir)
```
### TPU
[PyTorch XLA](https://pytorch.org/xla/release/2.1/index.html) supports FSDP training for TPUs and it can be enabled by modifying the FSDP configuration file generated by `accelerate config`. In addition to the sharding strategies and wrapping options specified above, you can add the parameters shown below to the file.
```yaml
xla: True # must be set to True to enable PyTorch/XLA
xla_fsdp_settings: # XLA-specific FSDP parameters
xla_fsdp_grad_ckpt: True # use gradient checkpointing
```
The [`xla_fsdp_settings`](https://github.com/pytorch/xla/blob/2e6e183e0724818f137c8135b34ef273dea33318/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py#L128) allow you to configure additional XLA-specific parameters for FSDP.
## Launch training
An example FSDP configuration file may look like:
```yaml
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch_policy: BACKWARD_PRE
fsdp_cpu_ram_efficient_loading: true
fsdp_forward_prefetch: false
fsdp_offload_params: true
fsdp_sharding_strategy: 1
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sync_module_states: true
fsdp_transformer_layer_cls_to_wrap: BertLayer
fsdp_use_orig_params: true
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 2
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
```
To launch training, run the [`accelerate launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) command and it'll automatically use the configuration file you previously created with `accelerate config`.
```bash
accelerate launch my-trainer-script.py
```
```bash
accelerate launch --fsdp="full shard" --fsdp_config="path/to/fsdp_config/ my-trainer-script.py
```
## Next steps
FSDP can be a powerful tool for training really large models and you have access to more than one GPU or TPU. By sharding the model parameters, optimizer and gradient states, and even offloading them to the CPU when they're inactive, FSDP can reduce the high cost of large-scale training. If you're interested in learning more, the following may be helpful:
* Follow along with the more in-depth Accelerate guide for [FSDP](https://huggingface.co/docs/accelerate/usage_guides/fsdp).
* Read the [Introducing PyTorch Fully Sharded Data Parallel (FSDP) API](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) blog post.
* Read the [Scaling PyTorch models on Cloud TPUs with FSDP](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) blog post.
| transformers/docs/source/en/fsdp.md/0 | {
"file_path": "transformers/docs/source/en/fsdp.md",
"repo_id": "transformers",
"token_count": 2239
} | 232 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Generation with LLMs
[[open-in-colab]]
LLMs, or Large Language Models, are the key component behind text generation. In a nutshell, they consist of large pretrained transformer models trained to predict the next word (or, more precisely, token) given some input text. Since they predict one token at a time, you need to do something more elaborate to generate new sentences other than just calling the model -- you need to do autoregressive generation.
Autoregressive generation is the inference-time procedure of iteratively calling a model with its own generated outputs, given a few initial inputs. In 🤗 Transformers, this is handled by the [`~generation.GenerationMixin.generate`] method, which is available to all models with generative capabilities.
This tutorial will show you how to:
* Generate text with an LLM
* Avoid common pitfalls
* Next steps to help you get the most out of your LLM
Before you begin, make sure you have all the necessary libraries installed:
```bash
pip install transformers bitsandbytes>=0.39.0 -q
```
## Generate text
A language model trained for [causal language modeling](tasks/language_modeling) takes a sequence of text tokens as input and returns the probability distribution for the next token.
<!-- [GIF 1 -- FWD PASS] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov"
></video>
<figcaption>"Forward pass of an LLM"</figcaption>
</figure>
A critical aspect of autoregressive generation with LLMs is how to select the next token from this probability distribution. Anything goes in this step as long as you end up with a token for the next iteration. This means it can be as simple as selecting the most likely token from the probability distribution or as complex as applying a dozen transformations before sampling from the resulting distribution.
<!-- [GIF 2 -- TEXT GENERATION] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov"
></video>
<figcaption>"Autoregressive generation iteratively selects the next token from a probability distribution to generate text"</figcaption>
</figure>
The process depicted above is repeated iteratively until some stopping condition is reached. Ideally, the stopping condition is dictated by the model, which should learn when to output an end-of-sequence (`EOS`) token. If this is not the case, generation stops when some predefined maximum length is reached.
Properly setting up the token selection step and the stopping condition is essential to make your model behave as you'd expect on your task. That is why we have a [`~generation.GenerationConfig`] file associated with each model, which contains a good default generative parameterization and is loaded alongside your model.
Let's talk code!
<Tip>
If you're interested in basic LLM usage, our high-level [`Pipeline`](pipeline_tutorial) interface is a great starting point. However, LLMs often require advanced features like quantization and fine control of the token selection step, which is best done through [`~generation.GenerationMixin.generate`]. Autoregressive generation with LLMs is also resource-intensive and should be executed on a GPU for adequate throughput.
</Tip>
First, you need to load the model.
```py
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained(
... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True
... )
```
You'll notice two flags in the `from_pretrained` call:
- `device_map` ensures the model is moved to your GPU(s)
- `load_in_4bit` applies [4-bit dynamic quantization](main_classes/quantization) to massively reduce the resource requirements
There are other ways to initialize a model, but this is a good baseline to begin with an LLM.
Next, you need to preprocess your text input with a [tokenizer](tokenizer_summary).
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", padding_side="left")
>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
```
The `model_inputs` variable holds the tokenized text input, as well as the attention mask. While [`~generation.GenerationMixin.generate`] does its best effort to infer the attention mask when it is not passed, we recommend passing it whenever possible for optimal results.
After tokenizing the inputs, you can call the [`~generation.GenerationMixin.generate`] method to returns the generated tokens. The generated tokens then should be converted to text before printing.
```py
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A list of colors: red, blue, green, yellow, orange, purple, pink,'
```
Finally, you don't need to do it one sequence at a time! You can batch your inputs, which will greatly improve the throughput at a small latency and memory cost. All you need to do is to make sure you pad your inputs properly (more on that below).
```py
>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default
>>> model_inputs = tokenizer(
... ["A list of colors: red, blue", "Portugal is"], return_tensors="pt", padding=True
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
['A list of colors: red, blue, green, yellow, orange, purple, pink,',
'Portugal is a country in southwestern Europe, on the Iber']
```
And that's it! In a few lines of code, you can harness the power of an LLM.
## Common pitfalls
There are many [generation strategies](generation_strategies), and sometimes the default values may not be appropriate for your use case. If your outputs aren't aligned with what you're expecting, we've created a list of the most common pitfalls and how to avoid them.
```py
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default
>>> model = AutoModelForCausalLM.from_pretrained(
... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True
... )
```
### Generated output is too short/long
If not specified in the [`~generation.GenerationConfig`] file, `generate` returns up to 20 tokens by default. We highly recommend manually setting `max_new_tokens` in your `generate` call to control the maximum number of new tokens it can return. Keep in mind LLMs (more precisely, [decoder-only models](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)) also return the input prompt as part of the output.
```py
>>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda")
>>> # By default, the output will contain up to 20 tokens
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5'
>>> # Setting `max_new_tokens` allows you to control the maximum length
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=50)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,'
```
### Incorrect generation mode
By default, and unless specified in the [`~generation.GenerationConfig`] file, `generate` selects the most likely token at each iteration (greedy decoding). Depending on your task, this may be undesirable; creative tasks like chatbots or writing an essay benefit from sampling. On the other hand, input-grounded tasks like audio transcription or translation benefit from greedy decoding. Enable sampling with `do_sample=True`, and you can learn more about this topic in this [blog post](https://huggingface.co/blog/how-to-generate).
```py
>>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility
>>> from transformers import set_seed
>>> set_seed(42)
>>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda")
>>> # LLM + greedy decoding = repetitive, boring output
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat. I am a cat. I am a cat. I am a cat'
>>> # With sampling, the output becomes more creative!
>>> generated_ids = model.generate(**model_inputs, do_sample=True)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat. Specifically, I am an indoor-only cat. I'
```
### Wrong padding side
LLMs are [decoder-only](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt) architectures, meaning they continue to iterate on your input prompt. If your inputs do not have the same length, they need to be padded. Since LLMs are not trained to continue from pad tokens, your input needs to be left-padded. Make sure you also don't forget to pass the attention mask to generate!
```py
>>> # The tokenizer initialized above has right-padding active by default: the 1st sequence,
>>> # which is shorter, has padding on the right side. Generation fails to capture the logic.
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'1, 2, 33333333333'
>>> # With left-padding, it works as expected!
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", padding_side="left")
>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'1, 2, 3, 4, 5, 6,'
```
### Wrong prompt
Some models and tasks expect a certain input prompt format to work properly. When this format is not applied, you will get a silent performance degradation: the model kinda works, but not as well as if you were following the expected prompt. More information about prompting, including which models and tasks need to be careful, is available in this [guide](tasks/prompting). Let's see an example with a chat LLM, which makes use of [chat templating](chat_templating):
```python
>>> tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
>>> model = AutoModelForCausalLM.from_pretrained(
... "HuggingFaceH4/zephyr-7b-alpha", device_map="auto", load_in_4bit=True
... )
>>> set_seed(0)
>>> prompt = """How many helicopters can a human eat in one sitting? Reply as a thug."""
>>> model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
>>> input_length = model_inputs.input_ids.shape[1]
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=20)
>>> print(tokenizer.batch_decode(generated_ids[:, input_length:], skip_special_tokens=True)[0])
"I'm not a thug, but i can tell you that a human cannot eat"
>>> # Oh no, it did not follow our instruction to reply as a thug! Let's see what happens when we write
>>> # a better prompt and use the right template for this model (through `tokenizer.apply_chat_template`)
>>> set_seed(0)
>>> messages = [
... {
... "role": "system",
... "content": "You are a friendly chatbot who always responds in the style of a thug",
... },
... {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
... ]
>>> model_inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to("cuda")
>>> input_length = model_inputs.shape[1]
>>> generated_ids = model.generate(model_inputs, do_sample=True, max_new_tokens=20)
>>> print(tokenizer.batch_decode(generated_ids[:, input_length:], skip_special_tokens=True)[0])
'None, you thug. How bout you try to focus on more useful questions?'
>>> # As we can see, it followed a proper thug style 😎
```
## Further resources
While the autoregressive generation process is relatively straightforward, making the most out of your LLM can be a challenging endeavor because there are many moving parts. For your next steps to help you dive deeper into LLM usage and understanding:
### Advanced generate usage
1. [Guide](generation_strategies) on how to control different generation methods, how to set up the generation configuration file, and how to stream the output;
2. [Guide](chat_templating) on the prompt template for chat LLMs;
3. [Guide](tasks/prompting) on to get the most of prompt design;
4. API reference on [`~generation.GenerationConfig`], [`~generation.GenerationMixin.generate`], and [generate-related classes](internal/generation_utils). Most of the classes, including the logits processors, have usage examples!
### LLM leaderboards
1. [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), which focuses on the quality of the open-source models;
2. [Open LLM-Perf Leaderboard](https://huggingface.co/spaces/optimum/llm-perf-leaderboard), which focuses on LLM throughput.
### Latency, throughput and memory utilization
1. [Guide](llm_tutorial_optimization) on how to optimize LLMs for speed and memory;
2. [Guide](main_classes/quantization) on quantization such as bitsandbytes and autogptq, which shows you how to drastically reduce your memory requirements.
### Related libraries
1. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference), a production-ready server for LLMs;
2. [`optimum`](https://github.com/huggingface/optimum), an extension of 🤗 Transformers that optimizes for specific hardware devices.
| transformers/docs/source/en/llm_tutorial.md/0 | {
"file_path": "transformers/docs/source/en/llm_tutorial.md",
"repo_id": "transformers",
"token_count": 4361
} | 233 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Pipelines
The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of
the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity
Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the
[task summary](../task_summary) for examples of use.
There are two categories of pipeline abstractions to be aware about:
- The [`pipeline`] which is the most powerful object encapsulating all other pipelines.
- Task-specific pipelines are available for [audio](#audio), [computer vision](#computer-vision), [natural language processing](#natural-language-processing), and [multimodal](#multimodal) tasks.
## The pipeline abstraction
The *pipeline* abstraction is a wrapper around all the other available pipelines. It is instantiated as any other
pipeline but can provide additional quality of life.
Simple call on one item:
```python
>>> pipe = pipeline("text-classification")
>>> pipe("This restaurant is awesome")
[{'label': 'POSITIVE', 'score': 0.9998743534088135}]
```
If you want to use a specific model from the [hub](https://huggingface.co) you can ignore the task if the model on
the hub already defines it:
```python
>>> pipe = pipeline(model="FacebookAI/roberta-large-mnli")
>>> pipe("This restaurant is awesome")
[{'label': 'NEUTRAL', 'score': 0.7313136458396912}]
```
To call a pipeline on many items, you can call it with a *list*.
```python
>>> pipe = pipeline("text-classification")
>>> pipe(["This restaurant is awesome", "This restaurant is awful"])
[{'label': 'POSITIVE', 'score': 0.9998743534088135},
{'label': 'NEGATIVE', 'score': 0.9996669292449951}]
```
To iterate over full datasets it is recommended to use a `dataset` directly. This means you don't need to allocate
the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on
GPU. If it doesn't don't hesitate to create an issue.
```python
import datasets
from transformers import pipeline
from transformers.pipelines.pt_utils import KeyDataset
from tqdm.auto import tqdm
pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0)
dataset = datasets.load_dataset("superb", name="asr", split="test")
# KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item
# as we're not interested in the *target* part of the dataset. For sentence pair use KeyPairDataset
for out in tqdm(pipe(KeyDataset(dataset, "file"))):
print(out)
# {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"}
# {"text": ....}
# ....
```
For ease of use, a generator is also possible:
```python
from transformers import pipeline
pipe = pipeline("text-classification")
def data():
while True:
# This could come from a dataset, a database, a queue or HTTP request
# in a server
# Caveat: because this is iterative, you cannot use `num_workers > 1` variable
# to use multiple threads to preprocess data. You can still have 1 thread that
# does the preprocessing while the main runs the big inference
yield "This is a test"
for out in pipe(data()):
print(out)
# {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"}
# {"text": ....}
# ....
```
[[autodoc]] pipeline
## Pipeline batching
All pipelines can use batching. This will work
whenever the pipeline uses its streaming ability (so when passing lists or `Dataset` or `generator`).
```python
from transformers import pipeline
from transformers.pipelines.pt_utils import KeyDataset
import datasets
dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised")
pipe = pipeline("text-classification", device=0)
for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"):
print(out)
# [{'label': 'POSITIVE', 'score': 0.9998743534088135}]
# Exactly the same output as before, but the content are passed
# as batches to the model
```
<Tip warning={true}>
However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending
on hardware, data and the actual model being used.
Example where it's mostly a speedup:
</Tip>
```python
from transformers import pipeline
from torch.utils.data import Dataset
from tqdm.auto import tqdm
pipe = pipeline("text-classification", device=0)
class MyDataset(Dataset):
def __len__(self):
return 5000
def __getitem__(self, i):
return "This is a test"
dataset = MyDataset()
for batch_size in [1, 8, 64, 256]:
print("-" * 30)
print(f"Streaming batch_size={batch_size}")
for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)):
pass
```
```
# On GTX 970
------------------------------
Streaming no batching
100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s]
------------------------------
Streaming batch_size=8
100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s]
------------------------------
Streaming batch_size=64
100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s]
------------------------------
Streaming batch_size=256
100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s]
(diminishing returns, saturated the GPU)
```
Example where it's most a slowdown:
```python
class MyDataset(Dataset):
def __len__(self):
return 5000
def __getitem__(self, i):
if i % 64 == 0:
n = 100
else:
n = 1
return "This is a test" * n
```
This is a occasional very long sentence compared to the other. In that case, the **whole** batch will need to be 400
tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on
bigger batches, the program simply crashes.
```
------------------------------
Streaming no batching
100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s]
------------------------------
Streaming batch_size=8
100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s]
------------------------------
Streaming batch_size=64
100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s]
------------------------------
Streaming batch_size=256
0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/home/nicolas/src/transformers/test.py", line 42, in <module>
for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)):
....
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch)
```
There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of
thumb:
For users, a rule of thumb is:
- **Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the
only way to go.**
- If you are latency constrained (live product doing inference), don't batch.
- If you are using CPU, don't batch.
- If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:
- If you have no clue about the size of the sequence_length ("natural" data), by default don't batch, measure and
try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don't
control the sequence_length.)
- If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push
it until you get OOMs.
- The larger the GPU the more likely batching is going to be more interesting
- As soon as you enable batching, make sure you can handle OOMs nicely.
## Pipeline chunk batching
`zero-shot-classification` and `question-answering` are slightly specific in the sense, that a single input might yield
multiple forward pass of a model. Under normal circumstances, this would yield issues with `batch_size` argument.
In order to circumvent this issue, both of these pipelines are a bit specific, they are `ChunkPipeline` instead of
regular `Pipeline`. In short:
```python
preprocessed = pipe.preprocess(inputs)
model_outputs = pipe.forward(preprocessed)
outputs = pipe.postprocess(model_outputs)
```
Now becomes:
```python
all_model_outputs = []
for preprocessed in pipe.preprocess(inputs):
model_outputs = pipe.forward(preprocessed)
all_model_outputs.append(model_outputs)
outputs = pipe.postprocess(all_model_outputs)
```
This should be very transparent to your code because the pipelines are used in
the same way.
This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don't have to care
about how many forward passes you inputs are actually going to trigger, you can optimize the `batch_size`
independently of the inputs. The caveats from the previous section still apply.
## Pipeline custom code
If you want to override a specific pipeline.
Don't hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most
cases, so `transformers` could maybe support your use case.
If you want to try simply you can:
- Subclass your pipeline of choice
```python
class MyPipeline(TextClassificationPipeline):
def postprocess():
# Your code goes here
scores = scores * 100
# And here
my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...)
# or if you use *pipeline* function, then:
my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline)
```
That should enable you to do all the custom code you want.
## Implementing a pipeline
[Implementing a new pipeline](../add_new_pipeline)
## Audio
Pipelines available for audio tasks include the following.
### AudioClassificationPipeline
[[autodoc]] AudioClassificationPipeline
- __call__
- all
### AutomaticSpeechRecognitionPipeline
[[autodoc]] AutomaticSpeechRecognitionPipeline
- __call__
- all
### TextToAudioPipeline
[[autodoc]] TextToAudioPipeline
- __call__
- all
### ZeroShotAudioClassificationPipeline
[[autodoc]] ZeroShotAudioClassificationPipeline
- __call__
- all
## Computer vision
Pipelines available for computer vision tasks include the following.
### DepthEstimationPipeline
[[autodoc]] DepthEstimationPipeline
- __call__
- all
### ImageClassificationPipeline
[[autodoc]] ImageClassificationPipeline
- __call__
- all
### ImageSegmentationPipeline
[[autodoc]] ImageSegmentationPipeline
- __call__
- all
### ImageToImagePipeline
[[autodoc]] ImageToImagePipeline
- __call__
- all
### ObjectDetectionPipeline
[[autodoc]] ObjectDetectionPipeline
- __call__
- all
### VideoClassificationPipeline
[[autodoc]] VideoClassificationPipeline
- __call__
- all
### ZeroShotImageClassificationPipeline
[[autodoc]] ZeroShotImageClassificationPipeline
- __call__
- all
### ZeroShotObjectDetectionPipeline
[[autodoc]] ZeroShotObjectDetectionPipeline
- __call__
- all
## Natural Language Processing
Pipelines available for natural language processing tasks include the following.
### ConversationalPipeline
[[autodoc]] Conversation
[[autodoc]] ConversationalPipeline
- __call__
- all
### FillMaskPipeline
[[autodoc]] FillMaskPipeline
- __call__
- all
### QuestionAnsweringPipeline
[[autodoc]] QuestionAnsweringPipeline
- __call__
- all
### SummarizationPipeline
[[autodoc]] SummarizationPipeline
- __call__
- all
### TableQuestionAnsweringPipeline
[[autodoc]] TableQuestionAnsweringPipeline
- __call__
### TextClassificationPipeline
[[autodoc]] TextClassificationPipeline
- __call__
- all
### TextGenerationPipeline
[[autodoc]] TextGenerationPipeline
- __call__
- all
### Text2TextGenerationPipeline
[[autodoc]] Text2TextGenerationPipeline
- __call__
- all
### TokenClassificationPipeline
[[autodoc]] TokenClassificationPipeline
- __call__
- all
### TranslationPipeline
[[autodoc]] TranslationPipeline
- __call__
- all
### ZeroShotClassificationPipeline
[[autodoc]] ZeroShotClassificationPipeline
- __call__
- all
## Multimodal
Pipelines available for multimodal tasks include the following.
### DocumentQuestionAnsweringPipeline
[[autodoc]] DocumentQuestionAnsweringPipeline
- __call__
- all
### FeatureExtractionPipeline
[[autodoc]] FeatureExtractionPipeline
- __call__
- all
### ImageFeatureExtractionPipeline
[[autodoc]] ImageFeatureExtractionPipeline
- __call__
- all
### ImageToTextPipeline
[[autodoc]] ImageToTextPipeline
- __call__
- all
### MaskGenerationPipeline
[[autodoc]] MaskGenerationPipeline
- __call__
- all
### VisualQuestionAnsweringPipeline
[[autodoc]] VisualQuestionAnsweringPipeline
- __call__
- all
## Parent class: `Pipeline`
[[autodoc]] Pipeline
| transformers/docs/source/en/main_classes/pipelines.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/pipelines.md",
"repo_id": "transformers",
"token_count": 4571
} | 234 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BEiT
## Overview
The BEiT model was proposed in [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by
Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of
Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class
of an image (as done in the [original ViT paper](https://arxiv.org/abs/2010.11929)), BEiT models are pre-trained to
predict visual tokens from the codebook of OpenAI's [DALL-E model](https://arxiv.org/abs/2102.12092) given masked
patches.
The abstract from the paper is the following:
*We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation
from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image
modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image
patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into
visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training
objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we
directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder.
Experimental results on image classification and semantic segmentation show that our model achieves competitive results
with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K,
significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains
86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).*
This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was
contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit).
## Usage tips
- BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They
outperform both the [original model (ViT)](vit) as well as [Data-efficient Image Transformers (DeiT)](deit) when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as
fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace
[`ViTFeatureExtractor`] by [`BeitImageProcessor`] and
[`ViTForImageClassification`] by [`BeitForImageClassification`]).
- There's also a demo notebook available which showcases how to combine DALL-E's image tokenizer with BEiT for
performing masked image modeling. You can find it [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT).
- As the BEiT models expect each image to be of the same size (resolution), one can use
[`BeitImageProcessor`] to resize (or rescale) and normalize images for the model.
- Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of
each checkpoint. For example, `microsoft/beit-base-patch16-224` refers to a base-sized architecture with patch
resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=microsoft/beit).
- The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of
14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million
images and 1,000 classes).
- BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the
relative position bias among the several self-attention layers. During fine-tuning, each layer's relative position
bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to
pre-train a model from scratch, one needs to either set the `use_relative_position_bias` or the
`use_relative_position_bias` attribute of [`BeitConfig`] to `True` in order to add
position embeddings.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/beit_architecture.jpg"
alt="drawing" width="600"/>
<small> BEiT pre-training. Taken from the <a href="https://arxiv.org/abs/2106.08254">original paper.</a> </small>
## Resources
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BEiT.
<PipelineTag pipeline="image-classification"/>
- [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- See also: [Image classification task guide](../tasks/image_classification)
**Semantic segmentation**
- [Semantic segmentation task guide](../tasks/semantic_segmentation)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## BEiT specific outputs
[[autodoc]] models.beit.modeling_beit.BeitModelOutputWithPooling
[[autodoc]] models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling
## BeitConfig
[[autodoc]] BeitConfig
## BeitFeatureExtractor
[[autodoc]] BeitFeatureExtractor
- __call__
- post_process_semantic_segmentation
## BeitImageProcessor
[[autodoc]] BeitImageProcessor
- preprocess
- post_process_semantic_segmentation
<frameworkcontent>
<pt>
## BeitModel
[[autodoc]] BeitModel
- forward
## BeitForMaskedImageModeling
[[autodoc]] BeitForMaskedImageModeling
- forward
## BeitForImageClassification
[[autodoc]] BeitForImageClassification
- forward
## BeitForSemanticSegmentation
[[autodoc]] BeitForSemanticSegmentation
- forward
</pt>
<jax>
## FlaxBeitModel
[[autodoc]] FlaxBeitModel
- __call__
## FlaxBeitForMaskedImageModeling
[[autodoc]] FlaxBeitForMaskedImageModeling
- __call__
## FlaxBeitForImageClassification
[[autodoc]] FlaxBeitForImageClassification
- __call__
</jax>
</frameworkcontent> | transformers/docs/source/en/model_doc/beit.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/beit.md",
"repo_id": "transformers",
"token_count": 2186
} | 235 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# CPM
## Overview
The CPM model was proposed in [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin,
Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen,
Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
The abstract from the paper is the following:
*Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3,
with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even
zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus
of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the
Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best
of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained
language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation,
cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many
NLP tasks in the settings of few-shot (even zero-shot) learning.*
This model was contributed by [canwenxu](https://huggingface.co/canwenxu). The original implementation can be found
here: https://github.com/TsinghuaAI/CPM-Generate
<Tip>
CPM's architecture is the same as GPT-2, except for tokenization method. Refer to [GPT-2 documentation](gpt2) for
API reference information.
</Tip>
## CpmTokenizer
[[autodoc]] CpmTokenizer
## CpmTokenizerFast
[[autodoc]] CpmTokenizerFast
| transformers/docs/source/en/model_doc/cpm.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/cpm.md",
"repo_id": "transformers",
"token_count": 735
} | 236 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FLAN-T5
## Overview
FLAN-T5 was released in the paper [Scaling Instruction-Finetuned Language Models](https://arxiv.org/pdf/2210.11416.pdf) - it is an enhanced version of T5 that has been finetuned in a mixture of tasks.
One can directly use FLAN-T5 weights without finetuning the model:
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
>>> tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
>>> inputs = tokenizer("A step by step recipe to make bolognese pasta:", return_tensors="pt")
>>> outputs = model.generate(**inputs)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
['Pour a cup of bolognese into a large bowl and add the pasta']
```
FLAN-T5 includes the same improvements as T5 version 1.1 (see [here](https://huggingface.co/docs/transformers/model_doc/t5v1.1) for the full details of the model's improvements.)
Google has released the following variants:
- [google/flan-t5-small](https://huggingface.co/google/flan-t5-small)
- [google/flan-t5-base](https://huggingface.co/google/flan-t5-base)
- [google/flan-t5-large](https://huggingface.co/google/flan-t5-large)
- [google/flan-t5-xl](https://huggingface.co/google/flan-t5-xl)
- [google/flan-t5-xxl](https://huggingface.co/google/flan-t5-xxl).
The original checkpoints can be found [here](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints).
<Tip>
Refer to [T5's documentation page](t5) for all API reference, code examples and notebooks. For more details regarding training and evaluation of the FLAN-T5, refer to the model card.
</Tip> | transformers/docs/source/en/model_doc/flan-t5.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/flan-t5.md",
"repo_id": "transformers",
"token_count": 781
} | 237 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# GPT-NeoX
## Overview
We introduce GPT-NeoX-20B, a 20 billion parameter autoregressive language model trained on the Pile, whose weights will
be made freely and openly available to the public through a permissive license. It is, to the best of our knowledge,
the largest dense autoregressive model that has publicly available weights at the time of submission. In this work,
we describe GPT-NeoX-20B's architecture and training and evaluate its performance on a range of language-understanding,
mathematics, and knowledge-based tasks. We find that GPT-NeoX-20B is a particularly powerful few-shot reasoner and
gains far more in performance when evaluated five-shot than similarly sized GPT-3 and FairSeq models. We open-source
the training and evaluation code, as well as the model weights, at [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox).
Development of the model was led by Sid Black, Stella Biderman and Eric Hallahan, and the model was trained with
generous the support of [CoreWeave](https://www.coreweave.com/).
GPT-NeoX-20B was trained with fp16, thus it is recommended to initialize the model as follows:
```python
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b").half().cuda()
```
GPT-NeoX-20B also has a different tokenizer from the one used in GPT-J-6B and GPT-Neo. The new tokenizer allocates
additional tokens to whitespace characters, making the model more suitable for certain tasks like code generation.
## Usage example
The `generate()` method can be used to generate text using GPT Neo model.
```python
>>> from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
>>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b")
>>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("EleutherAI/gpt-neox-20b")
>>> prompt = "GPTNeoX20B is a 20B-parameter autoregressive Transformer model developed by EleutherAI."
>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids
>>> gen_tokens = model.generate(
... input_ids,
... do_sample=True,
... temperature=0.9,
... max_length=100,
... )
>>> gen_text = tokenizer.batch_decode(gen_tokens)[0]
```
## Using Flash Attention 2
Flash Attention 2 is an faster, optimized version of the model.
### Installation
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer).
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
```bash
pip install -U flash-attn --no-build-isolation
```
### Usage
To load a model using Flash Attention 2, we can pass the argument `attn_implementation="flash_attention_2"` to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference:
```python
>>> from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
...
```
### Expected speedups
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `stockmark/gpt-neox-japanese-1.4b` checkpoint and the Flash Attention 2 version of the model using a sequence length of 2048.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/gpt-neox-1.8b-speedup.jpg">
</div>
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
## GPTNeoXConfig
[[autodoc]] GPTNeoXConfig
## GPTNeoXTokenizerFast
[[autodoc]] GPTNeoXTokenizerFast
## GPTNeoXModel
[[autodoc]] GPTNeoXModel
- forward
## GPTNeoXForCausalLM
[[autodoc]] GPTNeoXForCausalLM
- forward
## GPTNeoXForQuestionAnswering
[[autodoc]] GPTNeoXForQuestionAnswering
- forward
## GPTNeoXForSequenceClassification
[[autodoc]] GPTNeoXForSequenceClassification
- forward
## GPTNeoXForTokenClassification
[[autodoc]] GPTNeoXForTokenClassification
- forward
| transformers/docs/source/en/model_doc/gpt_neox.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/gpt_neox.md",
"repo_id": "transformers",
"token_count": 1662
} | 238 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LayoutLMV2
## Overview
The LayoutLMV2 model was proposed in [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu,
Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. LayoutLMV2 improves [LayoutLM](layoutlm) to obtain
state-of-the-art results across several document image understanding benchmarks:
- information extraction from scanned documents: the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset (a
collection of 199 annotated forms comprising more than 30,000 words), the [CORD](https://github.com/clovaai/cord)
dataset (a collection of 800 receipts for training, 100 for validation and 100 for testing), the [SROIE](https://rrc.cvc.uab.es/?ch=13) dataset (a collection of 626 receipts for training and 347 receipts for testing)
and the [Kleister-NDA](https://github.com/applicaai/kleister-nda) dataset (a collection of non-disclosure
agreements from the EDGAR database, including 254 documents for training, 83 documents for validation, and 203
documents for testing).
- document image classification: the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset (a collection of
400,000 images belonging to one of 16 classes).
- document visual question answering: the [DocVQA](https://arxiv.org/abs/2007.00398) dataset (a collection of 50,000
questions defined on 12,000+ document images).
The abstract from the paper is the following:
*Pre-training of text and layout has proved effective in a variety of visually-rich document understanding tasks due to
its effective model architecture and the advantage of large-scale unlabeled scanned/digital-born documents. In this
paper, we present LayoutLMv2 by pre-training text, layout and image in a multi-modal framework, where new model
architectures and pre-training tasks are leveraged. Specifically, LayoutLMv2 not only uses the existing masked
visual-language modeling task but also the new text-image alignment and text-image matching tasks in the pre-training
stage, where cross-modality interaction is better learned. Meanwhile, it also integrates a spatial-aware self-attention
mechanism into the Transformer architecture, so that the model can fully understand the relative positional
relationship among different text blocks. Experiment results show that LayoutLMv2 outperforms strong baselines and
achieves new state-of-the-art results on a wide variety of downstream visually-rich document understanding tasks,
including FUNSD (0.7895 -> 0.8420), CORD (0.9493 -> 0.9601), SROIE (0.9524 -> 0.9781), Kleister-NDA (0.834 -> 0.852),
RVL-CDIP (0.9443 -> 0.9564), and DocVQA (0.7295 -> 0.8672). The pre-trained LayoutLMv2 model is publicly available at
this https URL.*
LayoutLMv2 depends on `detectron2`, `torchvision` and `tesseract`. Run the
following to install them:
```bash
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
python -m pip install torchvision tesseract
```
(If you are developing for LayoutLMv2, note that passing the doctests also requires the installation of these packages.)
## Usage tips
- The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during
pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning).
- LayoutLMv2 adds both a relative 1D attention bias as well as a spatial 2D attention bias to the attention scores in
the self-attention layers. Details can be found on page 5 of the [paper](https://arxiv.org/abs/2012.14740).
- Demo notebooks on how to use the LayoutLMv2 model on RVL-CDIP, FUNSD, DocVQA, CORD can be found [here](https://github.com/NielsRogge/Transformers-Tutorials).
- LayoutLMv2 uses Facebook AI's [Detectron2](https://github.com/facebookresearch/detectron2/) package for its visual
backbone. See [this link](https://detectron2.readthedocs.io/en/latest/tutorials/install.html) for installation
instructions.
- In addition to `input_ids`, [`~LayoutLMv2Model.forward`] expects 2 additional inputs, namely
`image` and `bbox`. The `image` input corresponds to the original document image in which the text
tokens occur. The model expects each document image to be of size 224x224. This means that if you have a batch of
document images, `image` should be a tensor of shape (batch_size, 3, 224, 224). This can be either a
`torch.Tensor` or a `Detectron2.structures.ImageList`. You don't need to normalize the channels, as this is
done by the model. Important to note is that the visual backbone expects BGR channels instead of RGB, as all models
in Detectron2 are pre-trained using the BGR format. The `bbox` input are the bounding boxes (i.e. 2D-positions)
of the input text tokens. This is identical to [`LayoutLMModel`]. These can be obtained using an
external OCR engine such as Google's [Tesseract](https://github.com/tesseract-ocr/tesseract) (there's a [Python
wrapper](https://pypi.org/project/pytesseract/) available). Each bounding box should be in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1)
represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on
a 0-1000 scale. To normalize, you can use the following function:
```python
def normalize_bbox(bbox, width, height):
return [
int(1000 * (bbox[0] / width)),
int(1000 * (bbox[1] / height)),
int(1000 * (bbox[2] / width)),
int(1000 * (bbox[3] / height)),
]
```
Here, `width` and `height` correspond to the width and height of the original document in which the token
occurs (before resizing the image). Those can be obtained using the Python Image Library (PIL) library for example, as
follows:
```python
from PIL import Image
image = Image.open(
"name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)."
)
width, height = image.size
```
However, this model includes a brand new [`~transformers.LayoutLMv2Processor`] which can be used to directly
prepare data for the model (including applying OCR under the hood). More information can be found in the "Usage"
section below.
- Internally, [`~transformers.LayoutLMv2Model`] will send the `image` input through its visual backbone to
obtain a lower-resolution feature map, whose shape is equal to the `image_feature_pool_shape` attribute of
[`~transformers.LayoutLMv2Config`]. This feature map is then flattened to obtain a sequence of image tokens. As
the size of the feature map is 7x7 by default, one obtains 49 image tokens. These are then concatenated with the text
tokens, and send through the Transformer encoder. This means that the last hidden states of the model will have a
length of 512 + 49 = 561, if you pad the text tokens up to the max length. More generally, the last hidden states
will have a shape of `seq_length` + `image_feature_pool_shape[0]` *
`config.image_feature_pool_shape[1]`.
- When calling [`~transformers.LayoutLMv2Model.from_pretrained`], a warning will be printed with a long list of
parameter names that are not initialized. This is not a problem, as these parameters are batch normalization
statistics, which are going to have values when fine-tuning on a custom dataset.
- If you want to train the model in a distributed environment, make sure to call [`synchronize_batch_norm`] on the
model in order to properly synchronize the batch normalization layers of the visual backbone.
In addition, there's LayoutXLM, which is a multilingual version of LayoutLMv2. More information can be found on
[LayoutXLM's documentation page](layoutxlm).
## Resources
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LayoutLMv2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- A notebook on how to [finetune LayoutLMv2 for text-classification on RVL-CDIP dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/RVL-CDIP/Fine_tuning_LayoutLMv2ForSequenceClassification_on_RVL_CDIP.ipynb).
- See also: [Text classification task guide](../tasks/sequence_classification)
<PipelineTag pipeline="question-answering"/>
- A notebook on how to [finetune LayoutLMv2 for question-answering on DocVQA dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/DocVQA/Fine_tuning_LayoutLMv2ForQuestionAnswering_on_DocVQA.ipynb).
- See also: [Question answering task guide](../tasks/question_answering)
- See also: [Document question answering task guide](../tasks/document_question_answering)
<PipelineTag pipeline="token-classification"/>
- A notebook on how to [finetune LayoutLMv2 for token-classification on CORD dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/CORD/Fine_tuning_LayoutLMv2ForTokenClassification_on_CORD.ipynb).
- A notebook on how to [finetune LayoutLMv2 for token-classification on FUNSD dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Fine_tuning_LayoutLMv2ForTokenClassification_on_FUNSD_using_HuggingFace_Trainer.ipynb).
- See also: [Token classification task guide](../tasks/token_classification)
## Usage: LayoutLMv2Processor
The easiest way to prepare data for the model is to use [`LayoutLMv2Processor`], which internally
combines a image processor ([`LayoutLMv2ImageProcessor`]) and a tokenizer
([`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]). The image processor
handles the image modality, while the tokenizer handles the text modality. A processor combines both, which is ideal
for a multi-modal model like LayoutLMv2. Note that you can still use both separately, if you only want to handle one
modality.
```python
from transformers import LayoutLMv2ImageProcessor, LayoutLMv2TokenizerFast, LayoutLMv2Processor
image_processor = LayoutLMv2ImageProcessor() # apply_ocr is set to True by default
tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
processor = LayoutLMv2Processor(image_processor, tokenizer)
```
In short, one can provide a document image (and possibly additional data) to [`LayoutLMv2Processor`],
and it will create the inputs expected by the model. Internally, the processor first uses
[`LayoutLMv2ImageProcessor`] to apply OCR on the image to get a list of words and normalized
bounding boxes, as well to resize the image to a given size in order to get the `image` input. The words and
normalized bounding boxes are then provided to [`LayoutLMv2Tokenizer`] or
[`LayoutLMv2TokenizerFast`], which converts them to token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide word labels to the processor,
which are turned into token-level `labels`.
[`LayoutLMv2Processor`] uses [PyTesseract](https://pypi.org/project/pytesseract/), a Python
wrapper around Google's Tesseract OCR engine, under the hood. Note that you can still use your own OCR engine of
choice, and provide the words and normalized boxes yourself. This requires initializing
[`LayoutLMv2ImageProcessor`] with `apply_ocr` set to `False`.
In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these
use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs).
**Use case 1: document image classification (training, inference) + token classification (inference), apply_ocr =
True**
This is the simplest case, in which the processor (actually the image processor) will perform OCR on the image to get
the words and normalized bounding boxes.
```python
from transformers import LayoutLMv2Processor
from PIL import Image
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
image = Image.open(
"name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)."
).convert("RGB")
encoding = processor(
image, return_tensors="pt"
) # you can also add all tokenizer parameters here such as padding, truncation
print(encoding.keys())
# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image'])
```
**Use case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False**
In case one wants to do OCR themselves, one can initialize the image processor with `apply_ocr` set to
`False`. In that case, one should provide the words and corresponding (normalized) bounding boxes themselves to
the processor.
```python
from transformers import LayoutLMv2Processor
from PIL import Image
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
image = Image.open(
"name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)."
).convert("RGB")
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes
encoding = processor(image, words, boxes=boxes, return_tensors="pt")
print(encoding.keys())
# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image'])
```
**Use case 3: token classification (training), apply_ocr=False**
For token classification tasks (such as FUNSD, CORD, SROIE, Kleister-NDA), one can also provide the corresponding word
labels in order to train a model. The processor will then convert these into token-level `labels`. By default, it
will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the
`ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can
initialize the tokenizer with `only_label_first_subword` set to `False`.
```python
from transformers import LayoutLMv2Processor
from PIL import Image
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
image = Image.open(
"name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)."
).convert("RGB")
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes
word_labels = [1, 2]
encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
print(encoding.keys())
# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'labels', 'image'])
```
**Use case 4: visual question answering (inference), apply_ocr=True**
For visual question answering tasks (such as DocVQA), you can provide a question to the processor. By default, the
processor will apply OCR on the image, and create [CLS] question tokens [SEP] word tokens [SEP].
```python
from transformers import LayoutLMv2Processor
from PIL import Image
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
image = Image.open(
"name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)."
).convert("RGB")
question = "What's his name?"
encoding = processor(image, question, return_tensors="pt")
print(encoding.keys())
# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image'])
```
**Use case 5: visual question answering (inference), apply_ocr=False**
For visual question answering tasks (such as DocVQA), you can provide a question to the processor. If you want to
perform OCR yourself, you can provide your own words and (normalized) bounding boxes to the processor.
```python
from transformers import LayoutLMv2Processor
from PIL import Image
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
image = Image.open(
"name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)."
).convert("RGB")
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes
encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
print(encoding.keys())
# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image'])
```
## LayoutLMv2Config
[[autodoc]] LayoutLMv2Config
## LayoutLMv2FeatureExtractor
[[autodoc]] LayoutLMv2FeatureExtractor
- __call__
## LayoutLMv2ImageProcessor
[[autodoc]] LayoutLMv2ImageProcessor
- preprocess
## LayoutLMv2Tokenizer
[[autodoc]] LayoutLMv2Tokenizer
- __call__
- save_vocabulary
## LayoutLMv2TokenizerFast
[[autodoc]] LayoutLMv2TokenizerFast
- __call__
## LayoutLMv2Processor
[[autodoc]] LayoutLMv2Processor
- __call__
## LayoutLMv2Model
[[autodoc]] LayoutLMv2Model
- forward
## LayoutLMv2ForSequenceClassification
[[autodoc]] LayoutLMv2ForSequenceClassification
## LayoutLMv2ForTokenClassification
[[autodoc]] LayoutLMv2ForTokenClassification
## LayoutLMv2ForQuestionAnswering
[[autodoc]] LayoutLMv2ForQuestionAnswering
| transformers/docs/source/en/model_doc/layoutlmv2.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/layoutlmv2.md",
"repo_id": "transformers",
"token_count": 5361
} | 239 |
Subsets and Splits