The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`. Running 1 job 0it [00:00, ?it/s] 0it [00:00, ?it/s] /usr/local/lib/python3.10/dist-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.21 (you have 1.4.15). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1. check_for_updates() /usr/local/lib/python3.10/dist-packages/controlnet_aux/mediapipe_face/mediapipe_face_common.py:7: UserWarning: The module 'mediapipe' is not installed. The package will have limited functionality. Please install it using the command: pip install 'mediapipe' warnings.warn( /usr/local/lib/python3.10/dist-packages/timm/models/layers/__init__.py:48: FutureWarning: Importing from timm.models.layers is deprecated, please import via timm.layers warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", FutureWarning) /usr/local/lib/python3.10/dist-packages/timm/models/registry.py:4: FutureWarning: Importing from timm.models.registry is deprecated, please import via timm.models warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_5m_224 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_5m_224. This is because the name being registered conflicts with an existing name. Please check if this is not expected. return register_model(fn_wrapper) /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_11m_224 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_11m_224. This is because the name being registered conflicts with an existing name. Please check if this is not expected. return register_model(fn_wrapper) /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_21m_224 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_21m_224. This is because the name being registered conflicts with an existing name. Please check if this is not expected. return register_model(fn_wrapper) /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_21m_384 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_21m_384. This is because the name being registered conflicts with an existing name. Please check if this is not expected. return register_model(fn_wrapper) /usr/local/lib/python3.10/dist-packages/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py:654: UserWarning: Overwriting tiny_vit_21m_512 in registry with controlnet_aux.segment_anything.modeling.tiny_vit_sam.tiny_vit_21m_512. This is because the name being registered conflicts with an existing name. Please check if this is not expected. return register_model(fn_wrapper) /workspace/ai-toolkit/extensions_built_in/sd_trainer/SDTrainer.py:61: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead. self.scaler = torch.cuda.amp.GradScaler() You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers { "type": "sd_trainer", "training_folder": "output", "device": "cuda:0", "network": { "type": "lora", "linear": 16, "linear_alpha": 16 }, "save": { "dtype": "float16", "save_every": 400, "max_step_saves_to_keep": 4, "push_to_hub": false }, "datasets": [ { "folder_path": "/workspace/ai-toolkit/images", "caption_ext": "txt", "caption_dropout_rate": 0.05, "shuffle_tokens": false, "cache_latents_to_disk": true, "resolution": [ 512, 768, 1024 ] } ], "train": { "batch_size": 1, "steps": 2000, "gradient_accumulation_steps": 1, "train_unet": true, "train_text_encoder": false, "gradient_checkpointing": true, "noise_scheduler": "flowmatch", "optimizer": "adamw8bit", "lr": 0.0004, "ema_config": { "use_ema": true, "ema_decay": 0.99 }, "dtype": "bf16" }, "model": { "name_or_path": "black-forest-labs/FLUX.1-dev", "is_flux": true, "quantize": true }, "sample": { "sampler": "flowmatch", "sample_every": 400, "width": 1024, "height": 1024, "prompts": [ "Photo of p3r5on holding a sign that says 'I LOVE PROMPTS!'", "Professional headshot of p3r5on in a business suit.", "A happy pilot p3r5on of a Boeing 747.", "A doctor p3r5on talking to a patient.", "A chef p3r5on in the middle of a bustling kitchen, plating a beautifully arranged dish.", "A young p3r5on with a big grin, holding a large ice cream cone in front of an old-fashioned ice cream parlor.", "A person p3r5on in a tuxedo, looking directly into the camera with a confident smile, standing on a red carpet at a gala event.", "Person p3r5on with a bitchin' 80's mullet hairstyle leaning out the window of a pontiac firebird" ], "neg": "", "seed": 42, "walk_seed": true, "guidance_scale": 4, "sample_steps": 20 }, "trigger_word": "p3r5on" } Using EMA ############################################# # Running job: my_first_flux_lora_v1 ############################################# Running 1 process Loading Flux model Loading transformer Quantizing transformer Loading vae Loading t5 Downloading shards: 0%| | 0/2 [00:00