text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ๐Ÿงจ Diffusers ํ•™์Šต ์˜ˆ์‹œ ์ด๋ฒˆ ์ฑ•ํ„ฐ์—์„œ๋Š” ๋‹ค์–‘ํ•œ ์œ ์ฆˆ์ผ€์ด์Šค๋“ค์— ๋Œ€ํ•œ ์˜ˆ์ œ ์ฝ”๋“œ๋“ค์„ ํ†ตํ•ด ์–ด๋–ป๊ฒŒํ•˜๋ฉด ํšจ๊ณผ์ ์œผ๋กœ `diffusers` ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์„๊นŒ์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. **Note**: ํ˜น์‹œ ์˜คํ”ผ์…œํ•œ ์˜ˆ์‹œ์ฝ”๋“œ๋ฅผ ์ฐพ๊ณ  ์žˆ๋‹ค๋ฉด, [์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)๋ฅผ ์ฐธ๊ณ ํ•ด๋ณด์„ธ์š”! ์—ฌ๊ธฐ์„œ ๋‹ค๋ฃฐ ์˜ˆ์‹œ๋“ค์€ ๋‹ค์Œ์„ ์ง€ํ–ฅํ•ฉ๋‹ˆ๋‹ค. - **์†์‰ฌ์šด ๋””ํŽœ๋˜์‹œ ์„ค์น˜** (Self-contained) : ์—ฌ๊ธฐ์„œ ์‚ฌ์šฉ๋  ์˜ˆ์‹œ ์ฝ”๋“œ๋“ค์˜ ๋””ํŽœ๋˜์‹œ ํŒจํ‚ค์ง€๋“ค์€ ์ „๋ถ€ `pip install` ๋ช…๋ น์–ด๋ฅผ ํ†ตํ•ด ์„ค์น˜ ๊ฐ€๋Šฅํ•œ ํŒจํ‚ค์ง€๋“ค์ž…๋‹ˆ๋‹ค. ๋˜ํ•œ ์นœ์ ˆํ•˜๊ฒŒ `requirements.txt` ํŒŒ์ผ์— ํ•ด๋‹น ํŒจํ‚ค์ง€๋“ค์ด ๋ช…์‹œ๋˜์–ด ์žˆ์–ด, `pip install -r requirements.txt`๋กœ ๊ฐ„ํŽธํ•˜๊ฒŒ ํ•ด๋‹น ๋””ํŽœ๋˜์‹œ๋“ค์„ ์„ค์น˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ์‹œ: [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) - **์†์‰ฌ์šด ์ˆ˜์ •** (Easy-to-tweak) : ์ €ํฌ๋Š” ๊ฐ€๋Šฅํ•˜๋ฉด ๋งŽ์€ ์œ ์ฆˆ ์ผ€์ด์Šค๋“ค์„ ์ œ๊ณตํ•˜๊ณ ์ž ํ•ฉ๋‹ˆ๋‹ค. ํ•˜์ง€๋งŒ ์˜ˆ์‹œ๋Š” ๊ฒฐ๊ตญ ๊ทธ์ € ์˜ˆ์‹œ๋ผ๋Š” ์ ๋“ค ๊ธฐ์–ตํ•ด์ฃผ์„ธ์š”. ์—ฌ๊ธฐ์„œ ์ œ๊ณต๋˜๋Š” ์˜ˆ์‹œ์ฝ”๋“œ๋“ค์„ ๊ทธ์ € ๋‹จ์ˆœํžˆ ๋ณต์‚ฌ-๋ถ™ํ˜€๋„ฃ๊ธฐํ•˜๋Š” ์‹์œผ๋กœ๋Š” ์—ฌ๋Ÿฌ๋ถ„์ด ๋งˆ์ฃผํ•œ ๋ฌธ์ œ๋“ค์„ ์†์‰ฝ๊ฒŒ ํ•ด๊ฒฐํ•  ์ˆœ ์—†์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋‹ค์‹œ ๋งํ•ด ์–ด๋Š ์ •๋„๋Š” ์—ฌ๋Ÿฌ๋ถ„์˜ ์ƒํ™ฉ๊ณผ ๋‹ˆ์ฆˆ์— ๋งž์ถฐ ์ฝ”๋“œ๋ฅผ ์ผ์ • ๋ถ€๋ถ„ ๊ณ ์ณ๋‚˜๊ฐ€์•ผ ํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ๋Œ€๋ถ€๋ถ„์˜ ํ•™์Šต ์˜ˆ์‹œ๋“ค์€ ๋ฐ์ดํ„ฐ์˜ ์ „์ฒ˜๋ฆฌ ๊ณผ์ •๊ณผ ํ•™์Šต ๊ณผ์ •์— ๋Œ€ํ•œ ์ฝ”๋“œ๋“ค์„ ํ•จ๊ป˜ ์ œ๊ณตํ•จ์œผ๋กœ์จ, ์‚ฌ์šฉ์ž๊ฐ€ ๋‹ˆ์ฆˆ์— ๋งž๊ฒŒ ์†์‰ฌ์šด ์ˆ˜์ •ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋•๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. - **์ž…๋ฌธ์ž ์นœํ™”์ ์ธ** (Beginner-friendly) : ์ด๋ฒˆ ์ฑ•ํ„ฐ๋Š” diffusion ๋ชจ๋ธ๊ณผ `diffusers` ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋Œ€ํ•œ ์ „๋ฐ˜์ ์ธ ์ดํ•ด๋ฅผ ๋•๊ธฐ ์œ„ํ•ด ์ž‘์„ฑ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ diffusion ๋ชจ๋ธ์— ๋Œ€ํ•œ ์ตœ์‹  SOTA (state-of-the-art) ๋ฐฉ๋ฒ•๋ก ๋“ค ๊ฐ€์šด๋ฐ์„œ๋„, ์ž…๋ฌธ์ž์—๊ฒŒ๋Š” ๋งŽ์ด ์–ด๋ ค์šธ ์ˆ˜ ์žˆ๋‹ค๊ณ  ํŒ๋‹จ๋˜๋ฉด, ํ•ด๋‹น ๋ฐฉ๋ฒ•๋ก ๋“ค์€ ์—ฌ๊ธฐ์„œ ๋‹ค๋ฃจ์ง€ ์•Š์œผ๋ ค๊ณ  ํ•ฉ๋‹ˆ๋‹ค. - **ํ•˜๋‚˜์˜ ํƒœ์Šคํฌ๋งŒ ํฌํ•จํ•  ๊ฒƒ**(One-purpose-only): ์—ฌ๊ธฐ์„œ ๋‹ค๋ฃฐ ์˜ˆ์‹œ๋“ค์€ ํ•˜๋‚˜์˜ ํƒœ์Šคํฌ๋งŒ ํฌํ•จํ•˜๊ณ  ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๋ฌผ๋ก  ์ด๋ฏธ์ง€ ์ดˆํ•ด์ƒํ™”(super-resolution)์™€ ์ด๋ฏธ์ง€ ๋ณด์ •(modification)๊ณผ ๊ฐ™์€ ์œ ์‚ฌํ•œ ๋ชจ๋ธ๋ง ํ”„๋กœ์„ธ์Šค๋ฅผ ๊ฐ–๋Š” ํƒœ์Šคํฌ๋“ค์ด ์กด์žฌํ•˜๊ฒ ์ง€๋งŒ, ํ•˜๋‚˜์˜ ์˜ˆ์ œ์— ํ•˜๋‚˜์˜ ํƒœ์Šคํฌ๋งŒ์„ ๋‹ด๋Š” ๊ฒƒ์ด ๋” ์ดํ•ดํ•˜๊ธฐ ์šฉ์ดํ•˜๋‹ค๊ณ  ํŒ๋‹จํ–ˆ๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค. ์ €ํฌ๋Š” diffusion ๋ชจ๋ธ์˜ ๋Œ€ํ‘œ์ ์ธ ํƒœ์Šคํฌ๋“ค์„ ๋‹ค๋ฃจ๋Š” ๊ณต์‹ ์˜ˆ์ œ๋ฅผ ์ œ๊ณตํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. *๊ณต์‹* ์˜ˆ์ œ๋Š” ํ˜„์žฌ ์ง„ํ–‰ํ˜•์œผ๋กœ `diffusers` ๊ด€๋ฆฌ์ž๋“ค(maintainers)์— ์˜ํ•ด ๊ด€๋ฆฌ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ์ €ํฌ๋Š” ์•ž์„œ ์ •์˜ํ•œ ์ €ํฌ์˜ ์ฒ ํ•™์„ ์—„๊ฒฉํ•˜๊ฒŒ ๋”ฐ๋ฅด๊ณ ์ž ๋…ธ๋ ฅํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ํ˜น์‹œ ์—ฌ๋Ÿฌ๋ถ„๊ป˜์„œ ์ด๋Ÿฌํ•œ ์˜ˆ์‹œ๊ฐ€ ๋ฐ˜๋“œ์‹œ ํ•„์š”ํ•˜๋‹ค๊ณ  ์ƒ๊ฐ๋˜์‹ ๋‹ค๋ฉด, ์–ธ์ œ๋“ ์ง€ [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) ํ˜น์€ ์ง์ ‘ [Pull Request](https://github.com/huggingface/diffusers/compare)๋ฅผ ์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์ €ํฌ๋Š” ์–ธ์ œ๋‚˜ ํ™˜์˜์ž…๋‹ˆ๋‹ค! ํ•™์Šต ์˜ˆ์‹œ๋“ค์€ ๋‹ค์–‘ํ•œ ํƒœ์Šคํฌ๋“ค์— ๋Œ€ํ•ด diffusion ๋ชจ๋ธ์„ ์‚ฌ์ „ํ•™์Šต(pretrain)ํ•˜๊ฑฐ๋‚˜ ํŒŒ์ธํŠœ๋‹(fine-tuning)ํ•˜๋Š” ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. ํ˜„์žฌ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์˜ˆ์ œ๋“ค์„ ์ง€์›ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. - [Unconditional Training](./unconditional_training) - [Text-to-Image Training](./text2image) - [Text Inversion](./text_inversion) - [Dreambooth](./dreambooth) memory-efficient attention ์—ฐ์‚ฐ์„ ์ˆ˜ํ–‰ํ•˜๊ธฐ ์œ„ํ•ด, ๊ฐ€๋Šฅํ•˜๋ฉด [xFormers](../optimization/xformers)๋ฅผ ์„ค์น˜ํ•ด์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ํ•™์Šต ์†๋„๋ฅผ ๋Š˜๋ฆฌ๊ณ  ๋ฉ”๋ชจ๋ฆฌ์— ๋Œ€ํ•œ ๋ถ€๋‹ด์„ ์ค„์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. | Task | ๐Ÿค— Accelerate | ๐Ÿค— Datasets | Colab |---|---|:---:|:---:| | [**Unconditional Image Generation**](./unconditional_training) | โœ… | โœ… | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | [**Text-to-Image fine-tuning**](./text2image) | โœ… | โœ… | | [**Textual Inversion**](./text_inversion) | โœ… | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | [**Dreambooth**](./dreambooth) | โœ… | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | [**Training with LoRA**](./lora) | โœ… | - | - | | [**ControlNet**](./controlnet) | โœ… | โœ… | - | | [**InstructPix2Pix**](./instructpix2pix) | โœ… | โœ… | - | | [**Custom Diffusion**](./custom_diffusion) | โœ… | โœ… | - | ## ์ปค๋ฎค๋‹ˆํ‹ฐ ๊ณต์‹ ์˜ˆ์ œ ์™ธ์—๋„ **์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ œ** ์—ญ์‹œ ์ œ๊ณตํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ํ•ด๋‹น ์˜ˆ์ œ๋“ค์€ ์šฐ๋ฆฌ์˜ ์ปค๋ฎค๋‹ˆํ‹ฐ์— ์˜ํ•ด ๊ด€๋ฆฌ๋ฉ๋‹ˆ๋‹ค. ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ฉจ๋Š” ํ•™์Šต ์˜ˆ์‹œ๋‚˜ ์ถ”๋ก  ํŒŒ์ดํ”„๋ผ์ธ์œผ๋กœ ๊ตฌ์„ฑ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์‹œ๋“ค์˜ ๊ฒฝ์šฐ, ์•ž์„œ ์ •์˜ํ–ˆ๋˜ ์ฒ ํ•™๋“ค์„ ์ข€ ๋” ๊ด€๋Œ€ํ•˜๊ฒŒ ์ ์šฉํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ์ด๋Ÿฌํ•œ ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์‹œ๋“ค์˜ ๊ฒฝ์šฐ, ๋ชจ๋“  ์ด์Šˆ๋“ค์— ๋Œ€ํ•œ ์œ ์ง€๋ณด์ˆ˜๋ฅผ ๋ณด์žฅํ•  ์ˆ˜๋Š” ์—†์Šต๋‹ˆ๋‹ค. ์œ ์šฉํ•˜๊ธด ํ•˜์ง€๋งŒ, ์•„์ง์€ ๋Œ€์ค‘์ ์ด์ง€ ๋ชปํ•˜๊ฑฐ๋‚˜ ์ €ํฌ์˜ ์ฒ ํ•™์— ๋ถ€ํ•ฉํ•˜์ง€ ์•Š๋Š” ์˜ˆ์ œ๋“ค์€ [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) ํด๋”์— ๋‹ด๊ธฐ๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. **Note**: ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ œ๋Š” `diffusers`์— ๊ธฐ์—ฌ(contribution)๋ฅผ ํฌ๋งํ•˜๋Š” ๋ถ„๋“ค์—๊ฒŒ [์•„์ฃผ ์ข‹์€ ๊ธฐ์—ฌ ์ˆ˜๋‹จ](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)์ด ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ## ์ฃผ๋ชฉํ•  ์‚ฌํ•ญ๋“ค ์ตœ์‹  ๋ฒ„์ „์˜ ์˜ˆ์‹œ ์ฝ”๋“œ๋“ค์˜ ์„ฑ๊ณต์ ์ธ ๊ตฌ๋™์„ ๋ณด์žฅํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š”, ๋ฐ˜๋“œ์‹œ **์†Œ์Šค์ฝ”๋“œ๋ฅผ ํ†ตํ•ด `diffusers`๋ฅผ ์„ค์น˜ํ•ด์•ผ ํ•˜๋ฉฐ,** ํ•ด๋‹น ์˜ˆ์‹œ ์ฝ”๋“œ๋“ค์ด ์š”๊ตฌํ•˜๋Š” ๋””ํŽœ๋˜์‹œ๋“ค ์—ญ์‹œ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด ์ƒˆ๋กœ์šด ๊ฐ€์ƒ ํ™˜๊ฒฝ์„ ๊ตฌ์ถ•ํ•˜๊ณ  ๋‹ค์Œ์˜ ๋ช…๋ น์–ด๋ฅผ ์‹คํ–‰ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` ๊ทธ ๋‹ค์Œ `cd` ๋ช…๋ น์–ด๋ฅผ ํ†ตํ•ด ํ•ด๋‹น ์˜ˆ์ œ ๋””๋ ‰ํ† ๋ฆฌ์— ์ ‘๊ทผํ•ด์„œ ๋‹ค์Œ ๋ช…๋ น์–ด๋ฅผ ์‹คํ–‰ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค. ```bash pip install -r requirements.txt ```
diffusers/docs/source/ko/training/overview.md/0
{ "file_path": "diffusers/docs/source/ko/training/overview.md", "repo_id": "diffusers", "token_count": 4744 }
99
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Overview ๐Ÿงจ Diffusers๋Š” ์ƒ์„ฑ ์ž‘์—…์„ ์œ„ํ•œ ๋‹ค์–‘ํ•œ ํŒŒ์ดํ”„๋ผ์ธ, ๋ชจ๋ธ, ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ์ตœ๋Œ€ํ•œ ๊ฐ„๋‹จํ•˜๊ฒŒ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๋„๋ก ๋‹จ์ผ ํ†ตํ•ฉ ๋ฉ”์„œ๋“œ์ธ `from_pretrained()`๋ฅผ ์ œ๊ณตํ•˜์—ฌ Hugging Face [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) ๋˜๋Š” ๋กœ์ปฌ ๋จธ์‹ ์—์„œ ์ด๋Ÿฌํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์ด๋‚˜ ๋ชจ๋ธ์„ ๋กœ๋“œํ•  ๋•Œ๋งˆ๋‹ค, ์ตœ์‹  ํŒŒ์ผ์ด ์ž๋™์œผ๋กœ ๋‹ค์šด๋กœ๋“œ๋˜๊ณ  ์บ์‹œ๋˜๋ฏ€๋กœ, ๋‹ค์Œ์— ํŒŒ์ผ์„ ๋‹ค์‹œ ๋‹ค์šด๋กœ๋“œํ•˜์ง€ ์•Š๊ณ ๋„ ๋น ๋ฅด๊ฒŒ ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ์„น์…˜์€ ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋”ฉ, ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ๋‹ค์–‘ํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ๋กœ๋“œํ•˜๋Š” ๋ฐฉ๋ฒ•, ์ฒดํฌํฌ์ธํŠธ variants๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•, ๊ทธ๋ฆฌ๊ณ  ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•ด ์•Œ์•„์•ผ ํ•  ๋ชจ๋“  ๊ฒƒ๋“ค์„ ๋‹ค๋ฃน๋‹ˆ๋‹ค. ๋˜ํ•œ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•๊ณผ ์„œ๋กœ ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ ๋ฐœ์ƒํ•˜๋Š” ์†๋„์™€ ํ’ˆ์งˆ๊ฐ„์˜ ํŠธ๋ ˆ์ด๋“œ ์˜คํ”„๋ฅผ ๋น„๊ตํ•˜๋Š” ๋ฐฉ๋ฒ• ์—ญ์‹œ ๋‹ค๋ฃน๋‹ˆ๋‹ค. ๊ทธ๋ฆฌ๊ณ  ๋งˆ์ง€๋ง‰์œผ๋กœ ๐Ÿงจ Diffusers์™€ ํ•จ๊ป˜ ํŒŒ์ดํ† ์น˜์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก KerasCV ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ณ€ํ™˜ํ•˜๊ณ  ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•์„ ์‚ดํŽด๋ด…๋‹ˆ๋‹ค.
diffusers/docs/source/ko/using-diffusers/loading_overview.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/loading_overview.md", "repo_id": "diffusers", "token_count": 1157 }
100
- sections: - local: index title: ๐Ÿงจ Diffusers - local: quicktour title: ๅฟซ้€Ÿๅ…ฅ้—จ - local: stable_diffusion title: ๆœ‰ๆ•ˆๅ’Œ้ซ˜ๆ•ˆ็š„ๆ‰ฉๆ•ฃ - local: installation title: ๅฎ‰่ฃ… title: ๅผ€ๅง‹
diffusers/docs/source/zh/_toctree.yml/0
{ "file_path": "diffusers/docs/source/zh/_toctree.yml", "repo_id": "diffusers", "token_count": 100 }
101
import inspect from typing import List, Optional, Union import torch from torch import nn from torch.nn import functional as F from torchvision import transforms from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput class MakeCutouts(nn.Module): def __init__(self, cut_size, cut_power=1.0): super().__init__() self.cut_size = cut_size self.cut_power = cut_power def forward(self, pixel_values, num_cutouts): sideY, sideX = pixel_values.shape[2:4] max_size = min(sideX, sideY) min_size = min(sideX, sideY, self.cut_size) cutouts = [] for _ in range(num_cutouts): size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size) offsetx = torch.randint(0, sideX - size + 1, ()) offsety = torch.randint(0, sideY - size + 1, ()) cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size] cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size)) return torch.cat(cutouts) def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def set_requires_grad(model, value): for param in model.parameters(): param.requires_grad = value class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin): """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000 - https://github.com/Jack000/glid-3-xl - https://github.dev/crowsonkb/k-diffusion """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, clip_model: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, clip_model=clip_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) self.cut_out_size = ( feature_extractor.size if isinstance(feature_extractor.size, int) else feature_extractor.size["shortest_edge"] ) self.make_cutouts = MakeCutouts(self.cut_out_size) set_requires_grad(self.text_encoder, False) set_requires_grad(self.clip_model, False) def freeze_vae(self): set_requires_grad(self.vae, False) def unfreeze_vae(self): set_requires_grad(self.vae, True) def freeze_unet(self): set_requires_grad(self.unet, False) def unfreeze_unet(self): set_requires_grad(self.unet, True) @torch.enable_grad() def cond_fn( self, latents, timestep, index, text_embeddings, noise_pred_original, text_embeddings_clip, clip_guidance_scale, num_cutouts, use_cutouts=True, ): latents = latents.detach().requires_grad_() latent_model_input = self.scheduler.scale_model_input(latents, timestep) # predict the noise residual noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) fac = torch.sqrt(beta_prod_t) sample = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, LMSDiscreteScheduler): sigma = self.scheduler.sigmas[index] sample = latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler)} not supported") sample = 1 / self.vae.config.scaling_factor * sample image = self.vae.decode(sample).sample image = (image / 2 + 0.5).clamp(0, 1) if use_cutouts: image = self.make_cutouts(image, num_cutouts) else: image = transforms.Resize(self.cut_out_size)(image) image = self.normalize(image).to(latents.dtype) image_embeddings_clip = self.clip_model.get_image_features(image) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) if use_cutouts: dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip) dists = dists.view([num_cutouts, sample.shape[0], -1]) loss = dists.sum(2).mean(0).sum() * clip_guidance_scale else: loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale grads = -torch.autograd.grad(loss, latents)[0] if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents.detach() + grads * (sigma**2) noise_pred = noise_pred_original else: noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads return noise_pred, latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = 512, width: Optional[int] = 512, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, clip_guidance_scale: Optional[float] = 100, clip_prompt: Optional[Union[str, List[str]]] = None, num_cutouts: Optional[int] = 4, use_cutouts: Optional[bool] = True, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # get prompt text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) if clip_guidance_scale > 0: if clip_prompt is not None: clip_text_input = self.tokenizer( clip_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ).input_ids.to(self.device) else: clip_text_input = text_input.input_ids.to(self.device) text_embeddings_clip = self.clip_model.get_text_features(clip_text_input) text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True) # duplicate text embeddings clip for each generation per prompt text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: max_length = text_input.input_ids.shape[-1] uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt") uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform classifier free guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: text_embeddings_for_guidance = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) noise_pred, latents = self.cond_fn( latents, t, i, text_embeddings_for_guidance, noise_pred, text_embeddings_clip, clip_guidance_scale, num_cutouts, use_cutouts, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # scale and decode the image latents with vae latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/clip_guided_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/clip_guided_stable_diffusion.py", "repo_id": "diffusers", "token_count": 6484 }
102
# Copyright 2024 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a single file implementation of LMD+. See README.md for examples. import ast import gc import inspect import math import warnings from collections.abc import Iterable from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.attention import Attention, GatedSelfAttentionDense from diffusers.models.attention_processor import AttnProcessor2_0 from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines import DiffusionPipeline from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained( ... "longlian/lmd_plus", ... custom_pipeline="llm_grounded_diffusion", ... custom_revision="main", ... variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe.enable_model_cpu_offload() >>> # Generate an image described by the prompt and >>> # insert objects described by text at the region defined by bounding boxes >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage" >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]] >>> phrases = ["a waterfall", "a modern high speed train"] >>> images = pipe( ... prompt=prompt, ... phrases=phrases, ... boxes=boxes, ... gligen_scheduled_sampling_beta=0.4, ... output_type="pil", ... num_inference_steps=50, ... lmd_guidance_kwargs={} ... ).images >>> images[0].save("./lmd_plus_generation.jpg") >>> # Generate directly from a text prompt and an LLM response >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage" >>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\""" [('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])] Background prompt: A beautiful forest with fall foliage Negative prompt: \""") >> images = pipe( ... prompt=prompt, ... negative_prompt=neg_prompt, ... phrases=phrases, ... boxes=boxes, ... gligen_scheduled_sampling_beta=0.4, ... output_type="pil", ... num_inference_steps=50, ... lmd_guidance_kwargs={} ... ).images >>> images[0].save("./lmd_plus_generation.jpg") images[0] ``` """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name # All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)] # Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`. DEFAULT_GUIDANCE_ATTN_KEYS = [ ("mid", 0, 0, 0), ("up", 1, 0, 0), ("up", 1, 1, 0), ("up", 1, 2, 0), ] def convert_attn_keys(key): """Convert the attention key from tuple format to the torch state format""" if key[0] == "mid": assert key[1] == 0, f"mid block only has one block but the index is {key[1]}" return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor" return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor" DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS] def scale_proportion(obj_box, H, W): # Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5". x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H) box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H) x_max, y_max = x_min + box_w, y_min + box_h x_min, y_min = max(x_min, 0), max(y_min, 0) x_max, y_max = min(x_max, W), min(y_max, H) return x_min, y_min, x_max, y_max # Adapted from the parent class `AttnProcessor2_0` class AttnProcessorWithHook(AttnProcessor2_0): def __init__( self, attn_processor_key, hidden_size, cross_attention_dim, hook=None, fast_attn=True, enabled=True, ): super().__init__() self.attn_processor_key = attn_processor_key self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.hook = hook self.fast_attn = fast_attn self.enabled = enabled def __call__( self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, scale: float = 1.0, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) args = () if USE_PEFT_BACKEND else (scale,) query = attn.to_q(hidden_states, *args) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states, *args) value = attn.to_v(encoder_hidden_states, *args) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads if (self.hook is not None and self.enabled) or not self.fast_attn: query_batch_dim = attn.head_to_batch_dim(query) key_batch_dim = attn.head_to_batch_dim(key) value_batch_dim = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask) if self.hook is not None and self.enabled: # Call the hook with query, key, value, and attention maps self.hook( self.attn_processor_key, query_batch_dim, key_batch_dim, value_batch_dim, attention_probs, ) if self.fast_attn: query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attention_mask is not None: # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) else: hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LLMGroundedDiffusionPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://arxiv.org/pdf/2305.13655.pdf. This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control. Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. requires_safety_checker (bool): Whether a safety checker is needed for this pipeline. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] objects_text = "Objects: " bg_prompt_text = "Background prompt: " bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip() neg_prompt_text = "Negative prompt: " neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip() def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): # This is copied from StableDiffusionPipeline, with hook initizations for LMD+. super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Initialize the attention hooks for LLM-grounded Diffusion self.register_attn_hooks(unet) self._saved_attn = None def attn_hook(self, name, query, key, value, attention_probs): if name in DEFAULT_GUIDANCE_ATTN_KEYS: self._saved_attn[name] = attention_probs @classmethod def convert_box(cls, box, height, width): # box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max x_min, y_min = box[0] / width, box[1] / height w_box, h_box = box[2] / width, box[3] / height x_max, y_max = x_min + w_box, y_min + h_box return x_min, y_min, x_max, y_max @classmethod def _parse_response_with_negative(cls, text): if not text: raise ValueError("LLM response is empty") if cls.objects_text in text: text = text.split(cls.objects_text)[1] text_split = text.split(cls.bg_prompt_text_no_trailing_space) if len(text_split) == 2: gen_boxes, text_rem = text_split else: raise ValueError(f"LLM response is incomplete: {text}") text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space) if len(text_split) == 2: bg_prompt, neg_prompt = text_split else: raise ValueError(f"LLM response is incomplete: {text}") try: gen_boxes = ast.literal_eval(gen_boxes) except SyntaxError as e: # Sometimes the response is in plain text if "No objects" in gen_boxes or gen_boxes.strip() == "": gen_boxes = [] else: raise e bg_prompt = bg_prompt.strip() neg_prompt = neg_prompt.strip() # LLM may return "None" to mean no negative prompt provided. if neg_prompt == "None": neg_prompt = "" return gen_boxes, bg_prompt, neg_prompt @classmethod def parse_llm_response(cls, response, canvas_height=512, canvas_width=512): # Infer from spec gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response) gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0]) phrases = [name for name, _ in gen_boxes] boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes] return phrases, boxes, bg_prompt, neg_prompt def check_inputs( self, prompt, height, width, callback_steps, phrases, boxes, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, phrase_indices=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt is None and phrase_indices is None: raise ValueError("If the prompt is None, the phrase_indices cannot be None") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if len(phrases) != len(boxes): ValueError( "length of `phrases` and `boxes` has to be same, but" f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}" ) def register_attn_hooks(self, unet): """Registering hooks to obtain the attention maps for guidance""" attn_procs = {} for name in unet.attn_processors.keys(): # Only obtain the queries and keys from cross-attention if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"): # Keep the same attn_processors for self-attention (no hooks for self-attention) attn_procs[name] = unet.attn_processors[name] continue cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] attn_procs[name] = AttnProcessorWithHook( attn_processor_key=name, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, hook=self.attn_hook, fast_attn=True, # Not enabled by default enabled=False, ) unet.set_attn_processor(attn_procs) def enable_fuser(self, enabled=True): for module in self.unet.modules(): if isinstance(module, GatedSelfAttentionDense): module.enabled = enabled def enable_attn_hook(self, enabled=True): for module in self.unet.attn_processors.values(): if isinstance(module, AttnProcessorWithHook): module.enabled = enabled def get_token_map(self, prompt, padding="do_not_pad", verbose=False): """Get a list of mapping: prompt index to str (prompt in a list of token str)""" fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np") input_ids = fg_prompt_tokens["input_ids"][0] token_map = [] for ind, item in enumerate(input_ids.tolist()): token = self.tokenizer._convert_id_to_token(item) if verbose: logger.info(f"{ind}, {token} ({item})") token_map.append(token) return token_map def get_phrase_indices( self, prompt, phrases, token_map=None, add_suffix_if_not_found=False, verbose=False, ): for obj in phrases: # Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix if obj not in prompt: prompt += "| " + obj if token_map is None: # We allow using a pre-computed token map. token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose) token_map_str = " ".join(token_map) phrase_indices = [] for obj in phrases: phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose) # Remove <bos> and <eos> in substr phrase_token_map = phrase_token_map[1:-1] phrase_token_map_len = len(phrase_token_map) phrase_token_map_str = " ".join(phrase_token_map) if verbose: logger.info( "Full str:", token_map_str, "Substr:", phrase_token_map_str, "Phrase:", phrases, ) # Count the number of token before substr # The substring comes with a trailing space that needs to be removed by minus one in the index. obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" ")) obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len)) phrase_indices.append(obj_position) if add_suffix_if_not_found: return phrase_indices, prompt return phrase_indices def add_ca_loss_per_attn_map_to_loss( self, loss, attn_map, object_number, bboxes, phrase_indices, fg_top_p=0.2, bg_top_p=0.2, fg_weight=1.0, bg_weight=1.0, ): # b is the number of heads, not batch b, i, j = attn_map.shape H = W = int(math.sqrt(i)) for obj_idx in range(object_number): obj_loss = 0 mask = torch.zeros(size=(H, W), device="cuda") obj_boxes = bboxes[obj_idx] # We support two level (one box per phrase) and three level (multiple boxes per phrase) if not isinstance(obj_boxes[0], Iterable): obj_boxes = [obj_boxes] for obj_box in obj_boxes: # x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W) mask[y_min:y_max, x_min:x_max] = 1 for obj_position in phrase_indices[obj_idx]: # Could potentially optimize to compute this for loop in batch. # Could crop the ref cross attention before saving to save memory. ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W) # shape: (b, H * W) ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W) k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1) k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1) mask_1d = mask.view(1, -1) # Max-based loss function # Take the topk over spatial dimension, and then take the sum over heads dim # The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own. obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight loss += obj_loss / len(phrase_indices[obj_idx]) return loss def compute_ca_loss( self, saved_attn, bboxes, phrase_indices, guidance_attn_keys, verbose=False, **kwargs, ): """ The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss. `AttnProcessor` will put attention maps into the `save_attn_to_dict`. `index` is the timestep. `ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token). `ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens. """ loss = torch.tensor(0).float().cuda() object_number = len(bboxes) if object_number == 0: return loss for attn_key in guidance_attn_keys: # We only have 1 cross attention for mid. attn_map_integrated = saved_attn[attn_key] if not attn_map_integrated.is_cuda: attn_map_integrated = attn_map_integrated.cuda() # Example dimension: [20, 64, 77] attn_map = attn_map_integrated.squeeze(dim=0) loss = self.add_ca_loss_per_attn_map_to_loss( loss, attn_map, object_number, bboxes, phrase_indices, **kwargs ) num_attn = len(guidance_attn_keys) if num_attn > 0: loss = loss / (object_number * num_attn) return loss @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, gligen_scheduled_sampling_beta: float = 0.3, phrases: List[str] = None, boxes: List[List[float]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, lmd_guidance_kwargs: Optional[Dict[str, Any]] = {}, phrase_indices: Optional[List[int]] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. phrases (`List[str]`): The phrases to guide what to include in each of the regions defined by the corresponding `boxes`. There should only be one phrase per bounding box. boxes (`List[List[float]]`): The bounding boxes that identify rectangular regions of the image that are going to be filled with the content described by the corresponding `phrases`. Each rectangular box is defined as a `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. gligen_scheduled_sampling_beta (`float`, defaults to 0.3): Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for scheduled sampling during inference for improved quality and controllability. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. lmd_guidance_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details. phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, phrases, boxes, negative_prompt, prompt_embeds, negative_prompt_embeds, phrase_indices, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 if phrase_indices is None: phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True) elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) if phrase_indices is None: phrase_indices = [] prompt_parsed = [] for prompt_item in prompt: ( phrase_indices_parsed_item, prompt_parsed_item, ) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True) phrase_indices.append(phrase_indices_parsed_item) prompt_parsed.append(prompt_parsed_item) prompt = prompt_parsed else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) cond_prompt_embeds = prompt_embeds # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5.1 Prepare GLIGEN variables max_objs = 30 if len(boxes) > max_objs: warnings.warn( f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", FutureWarning, ) phrases = phrases[:max_objs] boxes = boxes[:max_objs] n_objs = len(boxes) if n_objs: # prepare batched input to the PositionNet (boxes, phrases, mask) # Get tokens for phrases from pre-trained CLIPTokenizer tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device) # For the token, we use the same pre-trained text encoder # to obtain its text feature _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output # For each entity, described in phrases, is denoted with a bounding box, # we represent the location information as (xmin,ymin,xmax,ymax) cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) if n_objs: cond_boxes[:n_objs] = torch.tensor(boxes) text_embeddings = torch.zeros( max_objs, self.unet.config.cross_attention_dim, device=device, dtype=self.text_encoder.dtype, ) if n_objs: text_embeddings[:n_objs] = _text_embeddings # Generate a mask for each object that is entity described by phrases masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) masks[:n_objs] = 1 repeat_batch = batch_size * num_images_per_prompt cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone() text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone() masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone() if do_classifier_free_guidance: repeat_batch = repeat_batch * 2 cond_boxes = torch.cat([cond_boxes] * 2) text_embeddings = torch.cat([text_embeddings] * 2) masks = torch.cat([masks] * 2) masks[: repeat_batch // 2] = 0 if cross_attention_kwargs is None: cross_attention_kwargs = {} cross_attention_kwargs["gligen"] = { "boxes": cond_boxes, "positive_embeddings": text_embeddings, "masks": masks, } num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps)) self.enable_fuser(True) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None loss_attn = torch.tensor(10000.0) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Scheduled sampling if i == num_grounding_steps: self.enable_fuser(False) if latents.shape[1] != 4: latents = torch.randn_like(latents[:, :4]) # 7.1 Perform LMD guidance if boxes: latents, loss_attn = self.latent_lmd_guidance( cond_prompt_embeds, index=i, boxes=boxes, phrase_indices=phrase_indices, t=t, latents=latents, loss=loss_attn, **lmd_guidance_kwargs, ) # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.set_grad_enabled(True) def latent_lmd_guidance( self, cond_embeddings, index, boxes, phrase_indices, t, latents, loss, *, loss_scale=20, loss_threshold=5.0, max_iter=[3] * 5 + [2] * 5 + [1] * 5, guidance_timesteps=15, cross_attention_kwargs=None, guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS, verbose=False, clear_cache=False, unet_additional_kwargs={}, guidance_callback=None, **kwargs, ): scheduler, unet = self.scheduler, self.unet iteration = 0 if index < guidance_timesteps: if isinstance(max_iter, list): max_iter = max_iter[index] if verbose: logger.info( f"time index {index}, loss: {loss.item()/loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}" ) try: self.enable_attn_hook(enabled=True) while ( loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps ): self._saved_attn = {} latents.requires_grad_(True) latent_model_input = latents latent_model_input = scheduler.scale_model_input(latent_model_input, t) unet( latent_model_input, t, encoder_hidden_states=cond_embeddings, cross_attention_kwargs=cross_attention_kwargs, **unet_additional_kwargs, ) # update latents with guidance loss = ( self.compute_ca_loss( saved_attn=self._saved_attn, bboxes=boxes, phrase_indices=phrase_indices, guidance_attn_keys=guidance_attn_keys, verbose=verbose, **kwargs, ) * loss_scale ) if torch.isnan(loss): raise RuntimeError("**Loss is NaN**") # This callback allows visualizations. if guidance_callback is not None: guidance_callback(self, latents, loss, iteration, index) self._saved_attn = None grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0] latents.requires_grad_(False) # Scaling with classifier guidance alpha_prod_t = scheduler.alphas_cumprod[t] # Classifier guidance: https://arxiv.org/pdf/2105.05233.pdf # DDIM: https://arxiv.org/pdf/2010.02502.pdf scale = (1 - alpha_prod_t) ** (0.5) latents = latents - scale * grad_cond iteration += 1 if clear_cache: gc.collect() torch.cuda.empty_cache() if verbose: logger.info( f"time index {index}, loss: {loss.item()/loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}" ) finally: self.enable_attn_hook(enabled=False) return latents, loss # Below are methods copied from StableDiffusionPipeline # The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517 # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True, ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale @property def guidance_scale(self): return self._guidance_scale # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale @property def guidance_rescale(self): return self._guidance_rescale # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs @property def cross_attention_kwargs(self): return self._cross_attention_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps @property def num_timesteps(self): return self._num_timesteps
diffusers/examples/community/llm_grounded_diffusion.py/0
{ "file_path": "diffusers/examples/community/llm_grounded_diffusion.py", "repo_id": "diffusers", "token_count": 32751 }
103
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import abc import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from packaging import version from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel from diffusers.configuration_utils import FrozenDict, deprecate from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models.attention import Attention from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class Prompt2PromptPipeline( DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True, ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). The keyword arguments to configure the edit are: - edit_type (`str`). The edit type to apply. Can be either of `replace`, `refine`, `reweight`. - n_cross_replace (`int`): Number of diffusion steps in which cross attention should be replaced - n_self_replace (`int`): Number of diffusion steps in which self attention should be replaced - local_blend_words(`List[str]`, *optional*, default to `None`): Determines which area should be changed. If None, then the whole image can be changed. - equalizer_words(`List[str]`, *optional*, default to `None`): Required for edit type `reweight`. Determines which words should be enhanced. - equalizer_strengths (`List[float]`, *optional*, default to `None`) Required for edit type `reweight`. Determines which how much the words in `equalizer_words` should be enhanced. guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ self.controller = create_controller( prompt, cross_attention_kwargs, num_inference_steps, tokenizer=self.tokenizer, device=self.device, ) self.register_attention_control(self.controller) # add attention controller # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # step callback latents = self.controller.step_callback(latents) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post-processing if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None # 9. Run safety checker if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def register_attention_control(self, controller): attn_procs = {} cross_att_count = 0 for name in self.unet.attn_processors.keys(): (None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim) if name.startswith("mid_block"): self.unet.config.block_out_channels[-1] place_in_unet = "mid" elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) list(reversed(self.unet.config.block_out_channels))[block_id] place_in_unet = "up" elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) self.unet.config.block_out_channels[block_id] place_in_unet = "down" else: continue cross_att_count += 1 attn_procs[name] = P2PCrossAttnProcessor(controller=controller, place_in_unet=place_in_unet) self.unet.set_attn_processor(attn_procs) controller.num_att_layers = cross_att_count class P2PCrossAttnProcessor: def __init__(self, controller, place_in_unet): super().__init__() self.controller = controller self.place_in_unet = place_in_unet def __call__( self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, ): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross = encoder_hidden_states is not None encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) # one line change self.controller(attention_probs, is_cross, self.place_in_unet) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states def create_controller( prompts: List[str], cross_attention_kwargs: Dict, num_inference_steps: int, tokenizer, device, ) -> AttentionControl: edit_type = cross_attention_kwargs.get("edit_type", None) local_blend_words = cross_attention_kwargs.get("local_blend_words", None) equalizer_words = cross_attention_kwargs.get("equalizer_words", None) equalizer_strengths = cross_attention_kwargs.get("equalizer_strengths", None) n_cross_replace = cross_attention_kwargs.get("n_cross_replace", 0.4) n_self_replace = cross_attention_kwargs.get("n_self_replace", 0.4) # only replace if edit_type == "replace" and local_blend_words is None: return AttentionReplace( prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device, ) # replace + localblend if edit_type == "replace" and local_blend_words is not None: lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device) return AttentionReplace( prompts, num_inference_steps, n_cross_replace, n_self_replace, lb, tokenizer=tokenizer, device=device, ) # only refine if edit_type == "refine" and local_blend_words is None: return AttentionRefine( prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device, ) # refine + localblend if edit_type == "refine" and local_blend_words is not None: lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device) return AttentionRefine( prompts, num_inference_steps, n_cross_replace, n_self_replace, lb, tokenizer=tokenizer, device=device, ) # reweight if edit_type == "reweight": assert ( equalizer_words is not None and equalizer_strengths is not None ), "To use reweight edit, please specify equalizer_words and equalizer_strengths." assert len(equalizer_words) == len( equalizer_strengths ), "equalizer_words and equalizer_strengths must be of same length." equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer) return AttentionReweight( prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device, equalizer=equalizer, ) raise ValueError(f"Edit type {edit_type} not recognized. Use one of: replace, refine, reweight.") class AttentionControl(abc.ABC): def step_callback(self, x_t): return x_t def between_steps(self): return @property def num_uncond_att_layers(self): return 0 @abc.abstractmethod def forward(self, attn, is_cross: bool, place_in_unet: str): raise NotImplementedError def __call__(self, attn, is_cross: bool, place_in_unet: str): if self.cur_att_layer >= self.num_uncond_att_layers: h = attn.shape[0] attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet) self.cur_att_layer += 1 if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers: self.cur_att_layer = 0 self.cur_step += 1 self.between_steps() return attn def reset(self): self.cur_step = 0 self.cur_att_layer = 0 def __init__(self): self.cur_step = 0 self.num_att_layers = -1 self.cur_att_layer = 0 class EmptyControl(AttentionControl): def forward(self, attn, is_cross: bool, place_in_unet: str): return attn class AttentionStore(AttentionControl): @staticmethod def get_empty_store(): return { "down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": [], } def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" if attn.shape[1] <= 32**2: # avoid memory overhead self.step_store[key].append(attn) return attn def between_steps(self): if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] self.step_store = self.get_empty_store() def get_average_attention(self): average_attention = { key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store } return average_attention def reset(self): super(AttentionStore, self).reset() self.step_store = self.get_empty_store() self.attention_store = {} def __init__(self): super(AttentionStore, self).__init__() self.step_store = self.get_empty_store() self.attention_store = {} class LocalBlend: def __call__(self, x_t, attention_store): k = 1 maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3] maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words) for item in maps] maps = torch.cat(maps, dim=1) maps = (maps * self.alpha_layers).sum(-1).mean(1) mask = F.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k)) mask = F.interpolate(mask, size=(x_t.shape[2:])) mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0] mask = mask.gt(self.threshold) mask = (mask[:1] + mask[1:]).float() x_t = x_t[:1] + mask * (x_t - x_t[:1]) return x_t def __init__( self, prompts: List[str], words: [List[List[str]]], tokenizer, device, threshold=0.3, max_num_words=77, ): self.max_num_words = 77 alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, self.max_num_words) for i, (prompt, words_) in enumerate(zip(prompts, words)): if isinstance(words_, str): words_ = [words_] for word in words_: ind = get_word_inds(prompt, word, tokenizer) alpha_layers[i, :, :, :, :, ind] = 1 self.alpha_layers = alpha_layers.to(device) self.threshold = threshold class AttentionControlEdit(AttentionStore, abc.ABC): def step_callback(self, x_t): if self.local_blend is not None: x_t = self.local_blend(x_t, self.attention_store) return x_t def replace_self_attention(self, attn_base, att_replace): if att_replace.shape[2] <= 16**2: return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) else: return att_replace @abc.abstractmethod def replace_cross_attention(self, attn_base, att_replace): raise NotImplementedError def forward(self, attn, is_cross: bool, place_in_unet: str): super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet) # FIXME not replace correctly if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]): h = attn.shape[0] // (self.batch_size) attn = attn.reshape(self.batch_size, h, *attn.shape[1:]) attn_base, attn_repalce = attn[0], attn[1:] if is_cross: alpha_words = self.cross_replace_alpha[self.cur_step] attn_repalce_new = ( self.replace_cross_attention(attn_base, attn_repalce) * alpha_words + (1 - alpha_words) * attn_repalce ) attn[1:] = attn_repalce_new else: attn[1:] = self.replace_self_attention(attn_base, attn_repalce) attn = attn.reshape(self.batch_size * h, *attn.shape[2:]) return attn def __init__( self, prompts, num_steps: int, cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]], self_replace_steps: Union[float, Tuple[float, float]], local_blend: Optional[LocalBlend], tokenizer, device, ): super(AttentionControlEdit, self).__init__() # add tokenizer and device here self.tokenizer = tokenizer self.device = device self.batch_size = len(prompts) self.cross_replace_alpha = get_time_words_attention_alpha( prompts, num_steps, cross_replace_steps, self.tokenizer ).to(self.device) if isinstance(self_replace_steps, float): self_replace_steps = 0, self_replace_steps self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1]) self.local_blend = local_blend # ๅœจๅค–้ขๅฎšไน‰ๅŽไผ ่ฟ›ๆฅ class AttentionReplace(AttentionControlEdit): def replace_cross_attention(self, attn_base, att_replace): return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper) def __init__( self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, local_blend: Optional[LocalBlend] = None, tokenizer=None, device=None, ): super(AttentionReplace, self).__init__( prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device, ) self.mapper = get_replacement_mapper(prompts, self.tokenizer).to(self.device) class AttentionRefine(AttentionControlEdit): def replace_cross_attention(self, attn_base, att_replace): attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3) attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas) return attn_replace def __init__( self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, local_blend: Optional[LocalBlend] = None, tokenizer=None, device=None, ): super(AttentionRefine, self).__init__( prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device, ) self.mapper, alphas = get_refinement_mapper(prompts, self.tokenizer) self.mapper, alphas = self.mapper.to(self.device), alphas.to(self.device) self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1]) class AttentionReweight(AttentionControlEdit): def replace_cross_attention(self, attn_base, att_replace): if self.prev_controller is not None: attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace) attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :] return attn_replace def __init__( self, prompts, num_steps: int, cross_replace_steps: float, self_replace_steps: float, equalizer, local_blend: Optional[LocalBlend] = None, controller: Optional[AttentionControlEdit] = None, tokenizer=None, device=None, ): super(AttentionReweight, self).__init__( prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device, ) self.equalizer = equalizer.to(self.device) self.prev_controller = controller ### util functions for all Edits def update_alpha_time_word( alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int, word_inds: Optional[torch.Tensor] = None, ): if isinstance(bounds, float): bounds = 0, bounds start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0]) if word_inds is None: word_inds = torch.arange(alpha.shape[2]) alpha[:start, prompt_ind, word_inds] = 0 alpha[start:end, prompt_ind, word_inds] = 1 alpha[end:, prompt_ind, word_inds] = 0 return alpha def get_time_words_attention_alpha( prompts, num_steps, cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]], tokenizer, max_num_words=77, ): if not isinstance(cross_replace_steps, dict): cross_replace_steps = {"default_": cross_replace_steps} if "default_" not in cross_replace_steps: cross_replace_steps["default_"] = (0.0, 1.0) alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words) for i in range(len(prompts) - 1): alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], i) for key, item in cross_replace_steps.items(): if key != "default_": inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))] for i, ind in enumerate(inds): if len(ind) > 0: alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind) alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words) return alpha_time_words ### util functions for LocalBlend and ReplacementEdit def get_word_inds(text: str, word_place: int, tokenizer): split_text = text.split(" ") if isinstance(word_place, str): word_place = [i for i, word in enumerate(split_text) if word_place == word] elif isinstance(word_place, int): word_place = [word_place] out = [] if len(word_place) > 0: words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1] cur_len, ptr = 0, 0 for i in range(len(words_encode)): cur_len += len(words_encode[i]) if ptr in word_place: out.append(i + 1) if cur_len >= len(split_text[ptr]): ptr += 1 cur_len = 0 return np.array(out) ### util functions for ReplacementEdit def get_replacement_mapper_(x: str, y: str, tokenizer, max_len=77): words_x = x.split(" ") words_y = y.split(" ") if len(words_x) != len(words_y): raise ValueError( f"attention replacement edit can only be applied on prompts with the same length" f" but prompt A has {len(words_x)} words and prompt B has {len(words_y)} words." ) inds_replace = [i for i in range(len(words_y)) if words_y[i] != words_x[i]] inds_source = [get_word_inds(x, i, tokenizer) for i in inds_replace] inds_target = [get_word_inds(y, i, tokenizer) for i in inds_replace] mapper = np.zeros((max_len, max_len)) i = j = 0 cur_inds = 0 while i < max_len and j < max_len: if cur_inds < len(inds_source) and inds_source[cur_inds][0] == i: inds_source_, inds_target_ = inds_source[cur_inds], inds_target[cur_inds] if len(inds_source_) == len(inds_target_): mapper[inds_source_, inds_target_] = 1 else: ratio = 1 / len(inds_target_) for i_t in inds_target_: mapper[inds_source_, i_t] = ratio cur_inds += 1 i += len(inds_source_) j += len(inds_target_) elif cur_inds < len(inds_source): mapper[i, j] = 1 i += 1 j += 1 else: mapper[j, j] = 1 i += 1 j += 1 return torch.from_numpy(mapper).float() def get_replacement_mapper(prompts, tokenizer, max_len=77): x_seq = prompts[0] mappers = [] for i in range(1, len(prompts)): mapper = get_replacement_mapper_(x_seq, prompts[i], tokenizer, max_len) mappers.append(mapper) return torch.stack(mappers) ### util functions for ReweightEdit def get_equalizer( text: str, word_select: Union[int, Tuple[int, ...]], values: Union[List[float], Tuple[float, ...]], tokenizer, ): if isinstance(word_select, (int, str)): word_select = (word_select,) equalizer = torch.ones(len(values), 77) values = torch.tensor(values, dtype=torch.float32) for word in word_select: inds = get_word_inds(text, word, tokenizer) equalizer[:, inds] = values return equalizer ### util functions for RefinementEdit class ScoreParams: def __init__(self, gap, match, mismatch): self.gap = gap self.match = match self.mismatch = mismatch def mis_match_char(self, x, y): if x != y: return self.mismatch else: return self.match def get_matrix(size_x, size_y, gap): matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32) matrix[0, 1:] = (np.arange(size_y) + 1) * gap matrix[1:, 0] = (np.arange(size_x) + 1) * gap return matrix def get_traceback_matrix(size_x, size_y): matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32) matrix[0, 1:] = 1 matrix[1:, 0] = 2 matrix[0, 0] = 4 return matrix def global_align(x, y, score): matrix = get_matrix(len(x), len(y), score.gap) trace_back = get_traceback_matrix(len(x), len(y)) for i in range(1, len(x) + 1): for j in range(1, len(y) + 1): left = matrix[i, j - 1] + score.gap up = matrix[i - 1, j] + score.gap diag = matrix[i - 1, j - 1] + score.mis_match_char(x[i - 1], y[j - 1]) matrix[i, j] = max(left, up, diag) if matrix[i, j] == left: trace_back[i, j] = 1 elif matrix[i, j] == up: trace_back[i, j] = 2 else: trace_back[i, j] = 3 return matrix, trace_back def get_aligned_sequences(x, y, trace_back): x_seq = [] y_seq = [] i = len(x) j = len(y) mapper_y_to_x = [] while i > 0 or j > 0: if trace_back[i, j] == 3: x_seq.append(x[i - 1]) y_seq.append(y[j - 1]) i = i - 1 j = j - 1 mapper_y_to_x.append((j, i)) elif trace_back[i][j] == 1: x_seq.append("-") y_seq.append(y[j - 1]) j = j - 1 mapper_y_to_x.append((j, -1)) elif trace_back[i][j] == 2: x_seq.append(x[i - 1]) y_seq.append("-") i = i - 1 elif trace_back[i][j] == 4: break mapper_y_to_x.reverse() return x_seq, y_seq, torch.tensor(mapper_y_to_x, dtype=torch.int64) def get_mapper(x: str, y: str, tokenizer, max_len=77): x_seq = tokenizer.encode(x) y_seq = tokenizer.encode(y) score = ScoreParams(0, 1, -1) matrix, trace_back = global_align(x_seq, y_seq, score) mapper_base = get_aligned_sequences(x_seq, y_seq, trace_back)[-1] alphas = torch.ones(max_len) alphas[: mapper_base.shape[0]] = mapper_base[:, 1].ne(-1).float() mapper = torch.zeros(max_len, dtype=torch.int64) mapper[: mapper_base.shape[0]] = mapper_base[:, 1] mapper[mapper_base.shape[0] :] = len(y_seq) + torch.arange(max_len - len(y_seq)) return mapper, alphas def get_refinement_mapper(prompts, tokenizer, max_len=77): x_seq = prompts[0] mappers, alphas = [], [] for i in range(1, len(prompts)): mapper, alpha = get_mapper(x_seq, prompts[i], tokenizer, max_len) mappers.append(mapper) alphas.append(alpha) return torch.stack(mappers), torch.stack(alphas)
diffusers/examples/community/pipeline_prompt2prompt.py/0
{ "file_path": "diffusers/examples/community/pipeline_prompt2prompt.py", "repo_id": "diffusers", "token_count": 27974 }
104
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpeechToImagePipeline(DiffusionPipeline, StableDiffusionMixin): def __init__( self, speech_model: WhisperForConditionalGeneration, speech_processor: WhisperProcessor, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=speech_model, speech_processor=speech_processor, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, audio, sampling_rate=16_000, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, **kwargs, ): inputs = self.speech_processor.feature_extractor( audio, return_tensors="pt", sampling_rate=sampling_rate ).input_features.to(self.device) predicted_ids = self.speech_model.generate(inputs, max_length=480_000) prompt = self.speech_processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True, normalize=True)[ 0 ] if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return image return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/speech_to_image_diffusion.py/0
{ "file_path": "diffusers/examples/community/speech_to_image_diffusion.py", "repo_id": "diffusers", "token_count": 5033 }
105
# Copyright 2024 Peter Willemsen <[email protected]>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def make_transparency_mask(size, overlap_pixels, remove_borders=[]): size_x = size[0] - overlap_pixels * 2 size_y = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels mask = np.ones((size_y, size_x), dtype=np.uint8) * 255 mask = np.pad(mask, mode="linear_ramp", pad_width=overlap_pixels, end_values=0) if "l" in remove_borders: mask = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: mask = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: mask = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: mask = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def clamp(n, smallest, largest): return max(smallest, min(n, largest)) def clamp_rect(rect: [int], min: [int], max: [int]): return ( clamp(rect[0], min[0], max[0]), clamp(rect[1], min[1], max[1]), clamp(rect[2], min[0], max[0]), clamp(rect[3], min[1], max[1]), ) def add_overlap_rect(rect: [int], overlap: int, image_size: [int]): rect = list(rect) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap rect = clamp_rect(rect, [0, 0], [image_size[0], image_size[1]]) return rect def squeeze_tile(tile, original_image, original_slice, slice_x): result = Image.new("RGB", (tile.size[0] + original_slice, tile.size[1])) result.paste( original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), ) result.paste(tile, (original_slice, 0)) return result def unsqueeze_tile(tile, original_image_slice): crop_rect = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) tile = tile.crop(crop_rect) return tile def next_divisible(n, d): divisor = n % d return n - divisor class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline): r""" Pipeline for tile-based text-guided image super-resolution using Stable Diffusion 2, trading memory for compute to create gigantic images. This model inherits from [`StableDiffusionUpscalePipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. low_res_scheduler ([`SchedulerMixin`]): A scheduler used to add initial noise to the low res conditioning image. It must be an instance of [`DDPMScheduler`]. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, low_res_scheduler: DDPMScheduler, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], max_noise_level: int = 350, ): super().__init__( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, max_noise_level=max_noise_level, ) def _process_tile(self, original_image_slice, x, y, tile_size, tile_border, image, final_image, **kwargs): torch.manual_seed(0) crop_rect = ( min(image.size[0] - (tile_size + original_image_slice), x * tile_size), min(image.size[1] - (tile_size + original_image_slice), y * tile_size), min(image.size[0], (x + 1) * tile_size), min(image.size[1], (y + 1) * tile_size), ) crop_rect_with_overlap = add_overlap_rect(crop_rect, tile_border, image.size) tile = image.crop(crop_rect_with_overlap) translated_slice_x = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] translated_slice_x = translated_slice_x - (original_image_slice / 2) translated_slice_x = max(0, translated_slice_x) to_input = squeeze_tile(tile, image, original_image_slice, translated_slice_x) orig_input_size = to_input.size to_input = to_input.resize((tile_size, tile_size), Image.BICUBIC) upscaled_tile = super(StableDiffusionTiledUpscalePipeline, self).__call__(image=to_input, **kwargs).images[0] upscaled_tile = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC) upscaled_tile = unsqueeze_tile(upscaled_tile, original_image_slice) upscaled_tile = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC) remove_borders = [] if x == 0: remove_borders.append("l") elif crop_rect[2] == image.size[0]: remove_borders.append("r") if y == 0: remove_borders.append("t") elif crop_rect[3] == image.size[1]: remove_borders.append("b") transparency_mask = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=remove_borders ), mode="L", ) final_image.paste( upscaled_tile, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), transparency_mask ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: Union[PIL.Image.Image, List[PIL.Image.Image]], num_inference_steps: int = 75, guidance_scale: float = 9.0, noise_level: int = 50, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, tile_size: int = 128, tile_border: int = 32, original_image_slice: int = 32, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.FloatTensor`): `Image`, or tensor representing an image batch which will be upscaled. * num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): The number of pixels around a tile to consider (bigger means less seams, too big can lead to an OOM-error). original_image_slice (`int`, *optional*): The amount of pixels of the original image to calculate with the current tile (bigger means more depth is preserved, less blur occurs in the final image, too big can lead to an OOM-error or loss in detail). callback (`Callable`, *optional*): A function that take a callback function with a single argument, a dict, that contains the (partially) processed image under "image", as well as the progress (0 to 1, where 1 is completed) under "progress". Returns: A PIL.Image that is 4 times larger than the original input image. """ final_image = Image.new("RGB", (image.size[0] * 4, image.size[1] * 4)) tcx = math.ceil(image.size[0] / tile_size) tcy = math.ceil(image.size[1] / tile_size) total_tile_count = tcx * tcy current_count = 0 for y in range(tcy): for x in range(tcx): self._process_tile( original_image_slice, x, y, tile_size, tile_border, image, final_image, prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, noise_level=noise_level, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image}) return final_image def main(): # Run a demo model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionTiledUpscalePipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16) pipe = pipe.to("cuda") image = Image.open("../../docs/source/imgs/diffusers_library.jpg") def callback(obj): print(f"progress: {obj['progress']:.4f}") obj["image"].save("diffusers_library_progress.jpg") final_image = pipe(image=image, prompt="Black font, white background, vector", noise_level=40, callback=callback) final_image.save("diffusers_library.jpg") if __name__ == "__main__": main()
diffusers/examples/community/tiled_upscaling.py/0
{ "file_path": "diffusers/examples/community/tiled_upscaling.py", "repo_id": "diffusers", "token_count": 5904 }
106
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 Harutatsu Akiyama and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import shutil import warnings from pathlib import Path from urllib.parse import urlparse import accelerate import datasets import numpy as np import PIL import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_instruct_pix2pix import ( StableDiffusionXLInstructPix2PixPipeline, ) from diffusers.training_utils import EMAModel from diffusers.utils import check_min_version, deprecate, is_wandb_available, load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "fusing/instructpix2pix-1000-samples": ("file_name", "edited_image", "edit_prompt"), } WANDB_TABLE_COL_NAMES = ["file_name", "edited_image", "edit_prompt"] TORCH_DTYPE_MAPPING = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16} def log_validation( pipeline, args, accelerator, generator, global_step, is_final_validation=False, ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) val_save_dir = os.path.join(args.output_dir, "validation_images") if not os.path.exists(val_save_dir): os.makedirs(val_save_dir) original_image = ( lambda image_url_or_path: load_image(image_url_or_path) if urlparse(image_url_or_path).scheme else Image.open(image_url_or_path).convert("RGB") )(args.val_image_url_or_path) with torch.autocast(str(accelerator.device).replace(":0", ""), enabled=accelerator.mixed_precision == "fp16"): edited_images = [] # Run inference for val_img_idx in range(args.num_validation_images): a_val_img = pipeline( args.validation_prompt, image=original_image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7, generator=generator, ).images[0] edited_images.append(a_val_img) # Save validation images a_val_img.save(os.path.join(val_save_dir, f"step_{global_step}_val_img_{val_img_idx}.png")) for tracker in accelerator.trackers: if tracker.name == "wandb": wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES) for edited_image in edited_images: wandb_table.add_data(wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt) logger_name = "test" if is_final_validation else "validation" tracker.log({logger_name: wandb_table}) def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def parse_args(): parser = argparse.ArgumentParser(description="Script to train Stable Diffusion XL for InstructPix2Pix.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--vae_precision", type=str, choices=["fp32", "fp16", "bf16"], default="fp32", help=( "The vanilla SDXL 1.0 VAE can cause NaNs due to large activation values. Some custom models might already have a solution" " to this problem, and this flag allows you to use mixed precision to stabilize training." ), ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that ๐Ÿค— Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--original_image_column", type=str, default="input_image", help="The column of the dataset containing the original image on which edits where made.", ) parser.add_argument( "--edited_image_column", type=str, default="edited_image", help="The column of the dataset containing the edited image.", ) parser.add_argument( "--edit_prompt_column", type=str, default="edit_prompt", help="The column of the dataset containing the edit instruction.", ) parser.add_argument( "--val_image_url_or_path", type=str, default=None, help="URL to the original image that you would like to edit (used during inference for debugging purposes).", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference." ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run fine-tuning validation every X steps. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--output_dir", type=str, default="instruct-pix2pix-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=256, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this resolution." ), ) parser.add_argument( "--crops_coords_top_left_h", type=int, default=0, help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), ) parser.add_argument( "--crops_coords_top_left_w", type=int, default=0, help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--conditioning_dropout_prob", type=float, default=None, help="Conditioning dropout probability. Drops out the conditionings (image and edit prompt) used in training InstructPix2Pix. See section 3.2.1 in the paper: https://arxiv.org/abs/2211.09800.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def convert_to_np(image, resolution): if isinstance(image, str): image = PIL.Image.open(image) image = image.convert("RGB").resize((resolution, resolution)) return np.array(image).transpose(2, 0, 1) def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # InstructPix2Pix uses an additional image for conditioning. To accommodate that, # it uses 8 channels (instead of 4) in the first (conv) layer of the UNet. This UNet is # then fine-tuned on the custom InstructPix2Pix dataset. This modified UNet is initialized # from the pre-trained checkpoints. For the extra channels added to the first layer, they are # initialized to zero. logger.info("Initializing the XL InstructPix2Pix UNet from the pretrained UNet.") in_channels = 8 out_channels = unet.conv_in.out_channels unet.register_to_config(in_channels=in_channels) with torch.no_grad(): new_conv_in = nn.Conv2d( in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding ) new_conv_in.weight.zero_() new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) unet.conv_in = new_conv_in # Create EMA for the unet. if args.use_ema: ema_unet = EMAModel(unet.parameters(), model_cls=UNet2DConditionModel, model_config=unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/main/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.original_image_column is None: original_image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: original_image_column = args.original_image_column if original_image_column not in column_names: raise ValueError( f"--original_image_column' value '{args.original_image_column}' needs to be one of: {', '.join(column_names)}" ) if args.edit_prompt_column is None: edit_prompt_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: edit_prompt_column = args.edit_prompt_column if edit_prompt_column not in column_names: raise ValueError( f"--edit_prompt_column' value '{args.edit_prompt_column}' needs to be one of: {', '.join(column_names)}" ) if args.edited_image_column is None: edited_image_column = dataset_columns[2] if dataset_columns is not None else column_names[2] else: edited_image_column = args.edited_image_column if edited_image_column not in column_names: raise ValueError( f"--edited_image_column' value '{args.edited_image_column}' needs to be one of: {', '.join(column_names)}" ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 warnings.warn(f"weight_dtype {weight_dtype} may cause nan during vae encoding", UserWarning) elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 warnings.warn(f"weight_dtype {weight_dtype} may cause nan during vae encoding", UserWarning) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(captions, tokenizer): inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt", ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), ] ) def preprocess_images(examples): original_images = np.concatenate( [convert_to_np(image, args.resolution) for image in examples[original_image_column]] ) edited_images = np.concatenate( [convert_to_np(image, args.resolution) for image in examples[edited_image_column]] ) # We need to ensure that the original and the edited images undergo the same # augmentation transforms. images = np.concatenate([original_images, edited_images]) images = torch.tensor(images) images = 2 * (images / 255) - 1 return train_transforms(images) # Load scheduler, tokenizer and models. tokenizer_1 = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) tokenizer_2 = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False, ) text_encoder_cls_1 = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) text_encoder_cls_2 = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder_1 = text_encoder_cls_1.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder_2 = text_encoder_cls_2.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant ) # We ALWAYS pre-compute the additional condition embeddings needed for SDXL # UNet as the model is already big and it uses two text encoders. text_encoder_1.to(accelerator.device, dtype=weight_dtype) text_encoder_2.to(accelerator.device, dtype=weight_dtype) tokenizers = [tokenizer_1, tokenizer_2] text_encoders = [text_encoder_1, text_encoder_2] # Freeze vae and text_encoders vae.requires_grad_(False) text_encoder_1.requires_grad_(False) text_encoder_2.requires_grad_(False) # Set UNet to trainable. unet.train() # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(text_encoders, tokenizers, prompt): prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return prompt_embeds, pooled_prompt_embeds # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompts(text_encoders, tokenizers, prompts): prompt_embeds_all = [] pooled_prompt_embeds_all = [] for prompt in prompts: prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt) prompt_embeds_all.append(prompt_embeds) pooled_prompt_embeds_all.append(pooled_prompt_embeds) return torch.stack(prompt_embeds_all), torch.stack(pooled_prompt_embeds_all) # Adapted from examples.dreambooth.train_dreambooth_lora_sdxl # Here, we compute not just the text embeddings but also the additional embeddings # needed for the SD XL UNet to operate. def compute_embeddings_for_prompts(prompts, text_encoders, tokenizers): with torch.no_grad(): prompt_embeds_all, pooled_prompt_embeds_all = encode_prompts(text_encoders, tokenizers, prompts) add_text_embeds_all = pooled_prompt_embeds_all prompt_embeds_all = prompt_embeds_all.to(accelerator.device) add_text_embeds_all = add_text_embeds_all.to(accelerator.device) return prompt_embeds_all, add_text_embeds_all # Get null conditioning def compute_null_conditioning(): null_conditioning_list = [] for a_tokenizer, a_text_encoder in zip(tokenizers, text_encoders): null_conditioning_list.append( a_text_encoder( tokenize_captions([""], tokenizer=a_tokenizer).to(accelerator.device), output_hidden_states=True, ).hidden_states[-2] ) return torch.concat(null_conditioning_list, dim=-1) null_conditioning = compute_null_conditioning() def compute_time_ids(): crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w) original_size = target_size = (args.resolution, args.resolution) add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = torch.tensor([add_time_ids], dtype=weight_dtype) return add_time_ids.to(accelerator.device).repeat(args.train_batch_size, 1) add_time_ids = compute_time_ids() def preprocess_train(examples): # Preprocess images. preprocessed_images = preprocess_images(examples) # Since the original and edited images were concatenated before # applying the transformations, we need to separate them and reshape # them accordingly. original_images, edited_images = preprocessed_images.chunk(2) original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution) # Collate the preprocessed images into the `examples`. examples["original_pixel_values"] = original_images examples["edited_pixel_values"] = edited_images # Preprocess the captions. captions = list(examples[edit_prompt_column]) prompt_embeds_all, add_text_embeds_all = compute_embeddings_for_prompts(captions, text_encoders, tokenizers) examples["prompt_embeds"] = prompt_embeds_all examples["add_text_embeds"] = add_text_embeds_all return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): original_pixel_values = torch.stack([example["original_pixel_values"] for example in examples]) original_pixel_values = original_pixel_values.to(memory_format=torch.contiguous_format).float() edited_pixel_values = torch.stack([example["edited_pixel_values"] for example in examples]) edited_pixel_values = edited_pixel_values.to(memory_format=torch.contiguous_format).float() prompt_embeds = torch.concat([example["prompt_embeds"] for example in examples], dim=0) add_text_embeds = torch.concat([example["add_text_embeds"] for example in examples], dim=0) return { "original_pixel_values": original_pixel_values, "edited_pixel_values": edited_pixel_values, "prompt_embeds": prompt_embeds, "add_text_embeds": add_text_embeds, } # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # Move vae, unet and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. if args.pretrained_vae_model_name_or_path is not None: vae.to(accelerator.device, dtype=weight_dtype) else: vae.to(accelerator.device, dtype=TORCH_DTYPE_MAPPING[args.vae_precision]) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("instruct-pix2pix-xl", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # We want to learn the denoising process w.r.t the edited images which # are conditioned on the original image (which was edited) and the edit instruction. # So, first, convert images to latent space. if args.pretrained_vae_model_name_or_path is not None: edited_pixel_values = batch["edited_pixel_values"].to(dtype=weight_dtype) else: edited_pixel_values = batch["edited_pixel_values"] latents = vae.encode(edited_pixel_values).latent_dist.sample() latents = latents * vae.config.scaling_factor if args.pretrained_vae_model_name_or_path is None: latents = latents.to(weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # SDXL additional inputs encoder_hidden_states = batch["prompt_embeds"] add_text_embeds = batch["add_text_embeds"] # Get the additional image embedding for conditioning. # Instead of getting a diagonal Gaussian here, we simply take the mode. if args.pretrained_vae_model_name_or_path is not None: original_pixel_values = batch["original_pixel_values"].to(dtype=weight_dtype) else: original_pixel_values = batch["original_pixel_values"] original_image_embeds = vae.encode(original_pixel_values).latent_dist.sample() if args.pretrained_vae_model_name_or_path is None: original_image_embeds = original_image_embeds.to(weight_dtype) # Conditioning dropout to support classifier-free guidance during inference. For more details # check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800. if args.conditioning_dropout_prob is not None: random_p = torch.rand(bsz, device=latents.device, generator=generator) # Sample masks for the edit prompts. prompt_mask = random_p < 2 * args.conditioning_dropout_prob prompt_mask = prompt_mask.reshape(bsz, 1, 1) # Final text conditioning. encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states) # Sample masks for the original images. image_mask_dtype = original_image_embeds.dtype image_mask = 1 - ( (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype) * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype) ) image_mask = image_mask.reshape(bsz, 1, 1, 1) # Final image conditioning. original_image_embeds = image_mask * original_image_embeds # Concatenate the `original_image_embeds` with the `noisy_latents`. concatenated_noisy_latents = torch.cat([noisy_latents, original_image_embeds], dim=1) # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} model_pred = unet( concatenated_noisy_latents, timesteps, encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) ### BEGIN: Perform validation every `validation_epochs` steps if global_step % args.validation_steps == 0: if (args.val_image_url_or_path is not None) and (args.validation_prompt is not None): # create pipeline if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) # The models need unwrapping because for compatibility in distributed training mode. pipeline = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unwrap_model(unet), text_encoder=text_encoder_1, text_encoder_2=text_encoder_2, tokenizer=tokenizer_1, tokenizer_2=tokenizer_2, vae=vae, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) log_validation( pipeline, args, accelerator, generator, global_step, is_final_validation=False, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) del pipeline torch.cuda.empty_cache() ### END: Perform validation every `validation_epochs` steps if global_step >= args.max_train_steps: break # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder_1, text_encoder_2=text_encoder_2, tokenizer=tokenizer_1, tokenizer_2=tokenizer_2, vae=vae, unet=unwrap_model(unet), revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if (args.val_image_url_or_path is not None) and (args.validation_prompt is not None): log_validation( pipeline, args, accelerator, generator, global_step, is_final_validation=True, ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py/0
{ "file_path": "diffusers/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py", "repo_id": "diffusers", "token_count": 23330 }
107
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """Script to train a consistency model from scratch via (improved) consistency training.""" import argparse import gc import logging import math import os import shutil from datetime import timedelta from pathlib import Path import accelerate import datasets import numpy as np import torch from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm import diffusers from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNet2DModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, resolve_interpolation_mode from diffusers.utils import is_tensorboard_available, is_wandb_available from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb logger = get_logger(__name__, log_level="INFO") def _extract_into_tensor(arr, timesteps, broadcast_shape): """ Extract values from a 1-D numpy array for a batch of indices. :param arr: the 1-D numpy array. :param timesteps: a tensor of indices into the array to extract. :param broadcast_shape: a larger shape of K dimensions with the batch dimension equal to the length of timesteps. :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. """ if not isinstance(arr, torch.Tensor): arr = torch.from_numpy(arr) res = arr[timesteps].float().to(timesteps.device) while len(res.shape) < len(broadcast_shape): res = res[..., None] return res.expand(broadcast_shape) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) def get_discretization_steps(global_step: int, max_train_steps: int, s_0: int = 10, s_1: int = 1280, constant=False): """ Calculates the current discretization steps at global step k using the discretization curriculum N(k). """ if constant: return s_0 + 1 k_prime = math.floor(max_train_steps / (math.log2(math.floor(s_1 / s_0)) + 1)) num_discretization_steps = min(s_0 * 2 ** math.floor(global_step / k_prime), s_1) + 1 return num_discretization_steps def get_skip_steps(global_step, initial_skip: int = 1): # Currently only support constant skip curriculum. return initial_skip def get_karras_sigmas( num_discretization_steps: int, sigma_min: float = 0.002, sigma_max: float = 80.0, rho: float = 7.0, dtype=torch.float32, ): """ Calculates the Karras sigmas timestep discretization of [sigma_min, sigma_max]. """ ramp = np.linspace(0, 1, num_discretization_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho # Make sure sigmas are in increasing rather than decreasing order (see section 2 of the iCT paper) sigmas = sigmas[::-1].copy() sigmas = torch.from_numpy(sigmas).to(dtype=dtype) return sigmas def get_discretized_lognormal_weights(noise_levels: torch.FloatTensor, p_mean: float = -1.1, p_std: float = 2.0): """ Calculates the unnormalized weights for a 1D array of noise level sigma_i based on the discretized lognormal" " distribution used in the iCT paper (given in Equation 10). """ upper_prob = torch.special.erf((torch.log(noise_levels[1:]) - p_mean) / (math.sqrt(2) * p_std)) lower_prob = torch.special.erf((torch.log(noise_levels[:-1]) - p_mean) / (math.sqrt(2) * p_std)) weights = upper_prob - lower_prob return weights def get_loss_weighting_schedule(noise_levels: torch.FloatTensor): """ Calculates the loss weighting schedule lambda given a set of noise levels. """ return 1.0 / (noise_levels[1:] - noise_levels[:-1]) def add_noise(original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.FloatTensor): # Make sure timesteps (Karras sigmas) have the same device and dtype as original_samples sigmas = timesteps.to(device=original_samples.device, dtype=original_samples.dtype) while len(sigmas.shape) < len(original_samples.shape): sigmas = sigmas.unsqueeze(-1) noisy_samples = original_samples + noise * sigmas return noisy_samples def get_noise_preconditioning(sigmas, noise_precond_type: str = "cm"): """ Calculates the noise preconditioning function c_noise, which is used to transform the raw Karras sigmas into the timestep input for the U-Net. """ if noise_precond_type == "none": return sigmas elif noise_precond_type == "edm": return 0.25 * torch.log(sigmas) elif noise_precond_type == "cm": return 1000 * 0.25 * torch.log(sigmas + 1e-44) else: raise ValueError( f"Noise preconditioning type {noise_precond_type} is not current supported. Currently supported noise" f" preconditioning types are `none` (which uses the sigmas as is), `edm`, and `cm`." ) def get_input_preconditioning(sigmas, sigma_data=0.5, input_precond_type: str = "cm"): """ Calculates the input preconditioning factor c_in, which is used to scale the U-Net image input. """ if input_precond_type == "none": return 1 elif input_precond_type == "cm": return 1.0 / (sigmas**2 + sigma_data**2) else: raise ValueError( f"Input preconditioning type {input_precond_type} is not current supported. Currently supported input" f" preconditioning types are `none` (which uses a scaling factor of 1.0) and `cm`." ) def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=1.0): scaled_timestep = timestep_scaling * timestep c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 return c_skip, c_out def log_validation(unet, scheduler, args, accelerator, weight_dtype, step, name="teacher"): logger.info("Running validation... ") unet = accelerator.unwrap_model(unet) pipeline = ConsistencyModelPipeline( unet=unet, scheduler=scheduler, ) pipeline = pipeline.to(device=accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) class_labels = [None] if args.class_conditional: if args.num_classes is not None: class_labels = list(range(args.num_classes)) else: logger.warning( "The model is class-conditional but the number of classes is not set. The generated images will be" " unconditional rather than class-conditional." ) image_logs = [] for class_label in class_labels: images = [] with torch.autocast("cuda"): images = pipeline( num_inference_steps=1, batch_size=args.eval_batch_size, class_labels=[class_label] * args.eval_batch_size, generator=generator, ).images log = {"images": images} if args.class_conditional and class_label is not None: log["class_label"] = str(class_label) else: log["class_label"] = "images" image_logs.append(log) for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] class_label = log["class_label"] formatted_images = [] for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(class_label, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] class_label = log["class_label"] for image in images: image = wandb.Image(image, caption=class_label) formatted_images.append(image) tracker.log({f"validation/{name}": formatted_images}) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() torch.cuda.empty_cache() return image_logs def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") # ------------Model Arguments----------- parser.add_argument( "--model_config_name_or_path", type=str, default=None, help="The config of the UNet model to train, leave as None to use standard DDPM configuration.", ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, help=( "If initializing the weights from a pretrained model, the path to the pretrained model or model identifier" " from huggingface.co/models." ), ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help=( "Variant of the model files of the pretrained model identifier from huggingface.co/models, e.g. `fp16`," " `non_ema`, etc.", ), ) # ------------Dataset Arguments----------- parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that HF Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--dataset_image_column_name", type=str, default="image", help="The name of the image column in the dataset to use for training.", ) parser.add_argument( "--dataset_class_label_column_name", type=str, default="label", help="If doing class-conditional training, the name of the class label column in the dataset to use.", ) # ------------Image Processing Arguments----------- parser.add_argument( "--resolution", type=int, default=64, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--interpolation_type", type=str, default="bilinear", help=( "The interpolation function used when resizing images to the desired resolution. Choose between `bilinear`," " `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", default=False, action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--class_conditional", action="store_true", help=( "Whether to train a class-conditional model. If set, the class labels will be taken from the `label`" " column of the provided dataset." ), ) parser.add_argument( "--num_classes", type=int, default=None, help="The number of classes in the training data, if training a class-conditional model.", ) parser.add_argument( "--class_embed_type", type=str, default=None, help=( "The class embedding type to use. Choose from `None`, `identity`, and `timestep`. If `class_conditional`" " and `num_classes` and set, but `class_embed_type` is `None`, a embedding matrix will be used." ), ) # ------------Dataloader Arguments----------- parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "The number of subprocesses to use for data loading. 0 means that the data will be loaded in the main" " process." ), ) # ------------Training Arguments----------- # ----General Training Arguments---- parser.add_argument( "--output_dir", type=str, default="ddpm-model-64", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--overwrite_output_dir", action="store_true") parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") # ----Batch Size and Training Length---- parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) # ----Learning Rate---- parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="cosine", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) # ----Optimizer (Adam) Arguments---- parser.add_argument( "--optimizer_type", type=str, default="adamw", help=( "The optimizer algorithm to use for training. Choose between `radam` and `adamw`. The iCT paper uses" " RAdam." ), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.95, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument( "--adam_weight_decay", type=float, default=1e-6, help="Weight decay magnitude for the Adam optimizer." ) parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") # ----Consistency Training (CT) Specific Arguments---- parser.add_argument( "--prediction_type", type=str, default="sample", choices=["sample"], help="Whether the model should predict the 'epsilon'/noise error or directly the reconstructed image 'x0'.", ) parser.add_argument("--ddpm_num_steps", type=int, default=1000) parser.add_argument("--ddpm_num_inference_steps", type=int, default=1000) parser.add_argument("--ddpm_beta_schedule", type=str, default="linear") parser.add_argument( "--sigma_min", type=float, default=0.002, help=( "The lower boundary for the timestep discretization, which should be set to a small positive value close" " to zero to avoid numerical issues when solving the PF-ODE backwards in time." ), ) parser.add_argument( "--sigma_max", type=float, default=80.0, help=( "The upper boundary for the timestep discretization, which also determines the variance of the Gaussian" " prior." ), ) parser.add_argument( "--rho", type=float, default=7.0, help="The rho parameter for the Karras sigmas timestep dicretization.", ) parser.add_argument( "--huber_c", type=float, default=None, help=( "The Pseudo-Huber loss parameter c. If not set, this will default to the value recommended in the Improved" " Consistency Training (iCT) paper of 0.00054 * sqrt(d), where d is the data dimensionality." ), ) parser.add_argument( "--discretization_s_0", type=int, default=10, help=( "The s_0 parameter in the discretization curriculum N(k). This controls the number of training steps after" " which the number of discretization steps N will be doubled." ), ) parser.add_argument( "--discretization_s_1", type=int, default=1280, help=( "The s_1 parameter in the discretization curriculum N(k). This controls the upper limit to the number of" " discretization steps used. Increasing this value will reduce the bias at the cost of higher variance." ), ) parser.add_argument( "--constant_discretization_steps", action="store_true", help=( "Whether to set the discretization curriculum N(k) to be the constant value `discretization_s_0 + 1`. This" " is useful for testing when `max_number_steps` is small, when `k_prime` would otherwise be 0, causing" " a divide-by-zero error." ), ) parser.add_argument( "--p_mean", type=float, default=-1.1, help=( "The mean parameter P_mean for the (discretized) lognormal noise schedule, which controls the probability" " of sampling a (discrete) noise level sigma_i." ), ) parser.add_argument( "--p_std", type=float, default=2.0, help=( "The standard deviation parameter P_std for the (discretized) noise schedule, which controls the" " probability of sampling a (discrete) noise level sigma_i." ), ) parser.add_argument( "--noise_precond_type", type=str, default="cm", help=( "The noise preconditioning function to use for transforming the raw Karras sigmas into the timestep" " argument of the U-Net. Choose between `none` (the identity function), `edm`, and `cm`." ), ) parser.add_argument( "--input_precond_type", type=str, default="cm", help=( "The input preconditioning function to use for scaling the image input of the U-Net. Choose between `none`" " (a scaling factor of 1) and `cm`." ), ) parser.add_argument( "--skip_steps", type=int, default=1, help=( "The gap in indices between the student and teacher noise levels. In the iCT paper this is always set to" " 1, but theoretically this could be greater than 1 and/or altered according to a curriculum throughout" " training, much like the number of discretization steps is." ), ) parser.add_argument( "--cast_teacher", action="store_true", help="Whether to cast the teacher U-Net model to `weight_dtype` or leave it in full precision.", ) # ----Exponential Moving Average (EMA) Arguments---- parser.add_argument( "--use_ema", action="store_true", help="Whether to use Exponential Moving Average for the final model weights.", ) parser.add_argument( "--ema_min_decay", type=float, default=None, help=( "The minimum decay magnitude for EMA. If not set, this will default to the value of `ema_max_decay`," " resulting in a constant EMA decay rate." ), ) parser.add_argument( "--ema_max_decay", type=float, default=0.99993, help=( "The maximum decay magnitude for EMA. Setting `ema_min_decay` equal to this value will result in a" " constant decay rate." ), ) parser.add_argument( "--use_ema_warmup", action="store_true", help="Whether to use EMA warmup.", ) parser.add_argument("--ema_inv_gamma", type=float, default=1.0, help="The inverse gamma value for the EMA decay.") parser.add_argument("--ema_power", type=float, default=3 / 4, help="The power value for the EMA decay.") # ----Training Optimization Arguments---- parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) # ----Distributed Training Arguments---- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") # ------------Validation Arguments----------- parser.add_argument( "--validation_steps", type=int, default=200, help="Run validation every X steps.", ) parser.add_argument( "--eval_batch_size", type=int, default=16, help=( "The number of images to generate for evaluation. Note that if `class_conditional` and `num_classes` is" " set the effective number of images generated per evaluation step is `eval_batch_size * num_classes`." ), ) parser.add_argument("--save_images_epochs", type=int, default=10, help="How often to save images during training.") # ------------Validation Arguments----------- parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--save_model_epochs", type=int, default=10, help="How often to save the model during training." ) # ------------Logging Arguments----------- parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) # ------------HuggingFace Hub Arguments----------- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--hub_private_repo", action="store_true", help="Whether or not to create a private repository." ) # ------------Accelerate Arguments----------- parser.add_argument( "--tracker_project_name", type=str, default="consistency-training", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.dataset_name is None and args.train_data_dir is None: raise ValueError("You must specify either a dataset name from the hub or a train data directory.") return args def main(args): logging_dir = os.path.join(args.output_dir, args.logging_dir) if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) # a big number for high resolution or big dataset accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) if args.report_to == "tensorboard": if not is_tensorboard_available(): raise ImportError("Make sure to install tensorboard if you want to use it for logging during training.") elif args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # 1. Initialize the noise scheduler. initial_discretization_steps = get_discretization_steps( 0, args.max_train_steps, s_0=args.discretization_s_0, s_1=args.discretization_s_1, constant=args.constant_discretization_steps, ) noise_scheduler = CMStochasticIterativeScheduler( num_train_timesteps=initial_discretization_steps, sigma_min=args.sigma_min, sigma_max=args.sigma_max, rho=args.rho, ) # 2. Initialize the student U-Net model. if args.pretrained_model_name_or_path is not None: logger.info(f"Loading pretrained U-Net weights from {args.pretrained_model_name_or_path}... ") unet = UNet2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) elif args.model_config_name_or_path is None: # TODO: use default architectures from iCT paper if not args.class_conditional and (args.num_classes is not None or args.class_embed_type is not None): logger.warning( f"`--class_conditional` is set to `False` but `--num_classes` is set to {args.num_classes} and" f" `--class_embed_type` is set to {args.class_embed_type}. These values will be overridden to `None`." ) args.num_classes = None args.class_embed_type = None elif args.class_conditional and args.num_classes is None and args.class_embed_type is None: logger.warning( "`--class_conditional` is set to `True` but neither `--num_classes` nor `--class_embed_type` is set." "`class_conditional` will be overridden to `False`." ) args.class_conditional = False unet = UNet2DModel( sample_size=args.resolution, in_channels=3, out_channels=3, layers_per_block=2, block_out_channels=(128, 128, 256, 256, 512, 512), down_block_types=( "DownBlock2D", "DownBlock2D", "DownBlock2D", "DownBlock2D", "AttnDownBlock2D", "DownBlock2D", ), up_block_types=( "UpBlock2D", "AttnUpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", ), class_embed_type=args.class_embed_type, num_class_embeds=args.num_classes, ) else: config = UNet2DModel.load_config(args.model_config_name_or_path) unet = UNet2DModel.from_config(config) unet.train() # Create EMA for the student U-Net model. if args.use_ema: if args.ema_min_decay is None: args.ema_min_decay = args.ema_max_decay ema_unet = EMAModel( unet.parameters(), decay=args.ema_max_decay, min_decay=args.ema_min_decay, use_ema_warmup=args.use_ema_warmup, inv_gamma=args.ema_inv_gamma, power=args.ema_power, model_cls=UNet2DModel, model_config=unet.config, ) # 3. Initialize the teacher U-Net model from the student U-Net model. # Note that following the improved Consistency Training paper, the teacher U-Net is not updated via EMA (e.g. the # EMA decay rate is 0.) teacher_unet = UNet2DModel.from_config(unet.config) teacher_unet.load_state_dict(unet.state_dict()) teacher_unet.train() teacher_unet.requires_grad_(False) # 4. Handle mixed precision and device placement weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 args.mixed_precision = accelerator.mixed_precision elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 args.mixed_precision = accelerator.mixed_precision # Cast teacher_unet to weight_dtype if cast_teacher is set. if args.cast_teacher: teacher_dtype = weight_dtype else: teacher_dtype = torch.float32 teacher_unet.to(accelerator.device) if args.use_ema: ema_unet.to(accelerator.device) # 5. Handle saving and loading of checkpoints. # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: teacher_unet.save_pretrained(os.path.join(output_dir, "unet_teacher")) if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): load_model = UNet2DModel.from_pretrained(os.path.join(input_dir, "unet_teacher")) teacher_unet.load_state_dict(load_model.state_dict()) teacher_unet.to(accelerator.device) del load_model if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # 6. Enable optimizations if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() teacher_unet.enable_xformers_memory_efficient_attention() if args.use_ema: ema_unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.optimizer_type == "radam": optimizer_class = torch.optim.RAdam elif args.optimizer_type == "adamw": # Use 8-bit Adam for lower memory usage or to fine-tune the model for 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW else: raise ValueError( f"Optimizer type {args.optimizer_type} is not supported. Currently supported optimizer types are `radam`" f" and `adamw`." ) # 7. Initialize the optimizer optimizer = optimizer_class( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # 8. Dataset creation and data preprocessing # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, split="train", ) else: dataset = load_dataset("imagefolder", data_dir=args.train_data_dir, cache_dir=args.cache_dir, split="train") # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets and DataLoaders creation. interpolation_mode = resolve_interpolation_mode(args.interpolation_type) augmentations = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=interpolation_mode), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def transform_images(examples): images = [augmentations(image.convert("RGB")) for image in examples[args.dataset_image_column_name]] batch_dict = {"images": images} if args.class_conditional: batch_dict["class_labels"] = examples[args.dataset_class_label_column_name] return batch_dict logger.info(f"Dataset size: {len(dataset)}") dataset.set_transform(transform_images) train_dataloader = torch.utils.data.DataLoader( dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers ) # 9. Initialize the learning rate scheduler # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps, ) # 10. Prepare for training # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) def recalculate_num_discretization_step_values(discretization_steps, skip_steps): """ Recalculates all quantities depending on the number of discretization steps N. """ noise_scheduler = CMStochasticIterativeScheduler( num_train_timesteps=discretization_steps, sigma_min=args.sigma_min, sigma_max=args.sigma_max, rho=args.rho, ) current_timesteps = get_karras_sigmas(discretization_steps, args.sigma_min, args.sigma_max, args.rho) valid_teacher_timesteps_plus_one = current_timesteps[: len(current_timesteps) - skip_steps + 1] # timestep_weights are the unnormalized probabilities of sampling the timestep/noise level at each index timestep_weights = get_discretized_lognormal_weights( valid_teacher_timesteps_plus_one, p_mean=args.p_mean, p_std=args.p_std ) # timestep_loss_weights is the timestep-dependent loss weighting schedule lambda(sigma_i) timestep_loss_weights = get_loss_weighting_schedule(valid_teacher_timesteps_plus_one) current_timesteps = current_timesteps.to(accelerator.device) timestep_weights = timestep_weights.to(accelerator.device) timestep_loss_weights = timestep_loss_weights.to(accelerator.device) return noise_scheduler, current_timesteps, timestep_weights, timestep_loss_weights # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) accelerator.init_trackers(args.tracker_project_name, config=tracker_config) # Function for unwraping if torch.compile() was used in accelerate. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 # Resolve the c parameter for the Pseudo-Huber loss if args.huber_c is None: args.huber_c = 0.00054 * args.resolution * math.sqrt(unet.config.in_channels) # Get current number of discretization steps N according to our discretization curriculum current_discretization_steps = get_discretization_steps( initial_global_step, args.max_train_steps, s_0=args.discretization_s_0, s_1=args.discretization_s_1, constant=args.constant_discretization_steps, ) current_skip_steps = get_skip_steps(initial_global_step, initial_skip=args.skip_steps) if current_skip_steps >= current_discretization_steps: raise ValueError( f"The current skip steps is {current_skip_steps}, but should be smaller than the current number of" f" discretization steps {current_discretization_steps}" ) # Recalculate all quantities depending on the number of discretization steps N ( noise_scheduler, current_timesteps, timestep_weights, timestep_loss_weights, ) = recalculate_num_discretization_step_values(current_discretization_steps, current_skip_steps) progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) # 11. Train! for epoch in range(first_epoch, args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): # 1. Get batch of images from dataloader (sample x ~ p_data(x)) clean_images = batch["images"].to(weight_dtype) if args.class_conditional: class_labels = batch["class_labels"] else: class_labels = None bsz = clean_images.shape[0] # 2. Sample a random timestep for each image according to the noise schedule. # Sample random indices i ~ p(i), where p(i) is the dicretized lognormal distribution in the iCT paper # NOTE: timestep_indices should be in the range [0, len(current_timesteps) - k - 1] inclusive timestep_indices = torch.multinomial(timestep_weights, bsz, replacement=True).long() teacher_timesteps = current_timesteps[timestep_indices] student_timesteps = current_timesteps[timestep_indices + current_skip_steps] # 3. Sample noise and add it to the clean images for both teacher and student unets # Sample noise z ~ N(0, I) that we'll add to the images noise = torch.randn(clean_images.shape, dtype=weight_dtype, device=clean_images.device) # Add noise to the clean images according to the noise magnitude at each timestep # (this is the forward diffusion process) teacher_noisy_images = add_noise(clean_images, noise, teacher_timesteps) student_noisy_images = add_noise(clean_images, noise, student_timesteps) # 4. Calculate preconditioning and scalings for boundary conditions for the consistency model. teacher_rescaled_timesteps = get_noise_preconditioning(teacher_timesteps, args.noise_precond_type) student_rescaled_timesteps = get_noise_preconditioning(student_timesteps, args.noise_precond_type) c_in_teacher = get_input_preconditioning(teacher_timesteps, input_precond_type=args.input_precond_type) c_in_student = get_input_preconditioning(student_timesteps, input_precond_type=args.input_precond_type) c_skip_teacher, c_out_teacher = scalings_for_boundary_conditions(teacher_timesteps) c_skip_student, c_out_student = scalings_for_boundary_conditions(student_timesteps) c_skip_teacher, c_out_teacher, c_in_teacher = [ append_dims(x, clean_images.ndim) for x in [c_skip_teacher, c_out_teacher, c_in_teacher] ] c_skip_student, c_out_student, c_in_student = [ append_dims(x, clean_images.ndim) for x in [c_skip_student, c_out_student, c_in_student] ] with accelerator.accumulate(unet): # 5. Get the student unet denoising prediction on the student timesteps # Get rng state now to ensure that dropout is synced between the student and teacher models. dropout_state = torch.get_rng_state() student_model_output = unet( c_in_student * student_noisy_images, student_rescaled_timesteps, class_labels=class_labels ).sample # NOTE: currently only support prediction_type == sample, so no need to convert model_output student_denoise_output = c_skip_student * student_noisy_images + c_out_student * student_model_output # 6. Get the teacher unet denoising prediction on the teacher timesteps with torch.no_grad(), torch.autocast("cuda", dtype=teacher_dtype): torch.set_rng_state(dropout_state) teacher_model_output = teacher_unet( c_in_teacher * teacher_noisy_images, teacher_rescaled_timesteps, class_labels=class_labels ).sample # NOTE: currently only support prediction_type == sample, so no need to convert model_output teacher_denoise_output = ( c_skip_teacher * teacher_noisy_images + c_out_teacher * teacher_model_output ) # 7. Calculate the weighted Pseudo-Huber loss if args.prediction_type == "sample": # Note that the loss weights should be those at the (teacher) timestep indices. lambda_t = _extract_into_tensor( timestep_loss_weights, timestep_indices, (bsz,) + (1,) * (clean_images.ndim - 1) ) loss = lambda_t * ( torch.sqrt( (student_denoise_output.float() - teacher_denoise_output.float()) ** 2 + args.huber_c**2 ) - args.huber_c ) loss = loss.mean() else: raise ValueError( f"Unsupported prediction type: {args.prediction_type}. Currently, only `sample` is supported." ) # 8. Backpropagate on the consistency training loss accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: # 9. Update teacher_unet and ema_unet parameters using unet's parameters. teacher_unet.load_state_dict(unet.state_dict()) if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 if accelerator.is_main_process: # 10. Recalculate quantities depending on the global step, if necessary. new_discretization_steps = get_discretization_steps( global_step, args.max_train_steps, s_0=args.discretization_s_0, s_1=args.discretization_s_1, constant=args.constant_discretization_steps, ) current_skip_steps = get_skip_steps(global_step, initial_skip=args.skip_steps) if current_skip_steps >= new_discretization_steps: raise ValueError( f"The current skip steps is {current_skip_steps}, but should be smaller than the current" f" number of discretization steps {new_discretization_steps}." ) if new_discretization_steps != current_discretization_steps: ( noise_scheduler, current_timesteps, timestep_weights, timestep_loss_weights, ) = recalculate_num_discretization_step_values(new_discretization_steps, current_skip_steps) current_discretization_steps = new_discretization_steps if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step % args.validation_steps == 0: # NOTE: since we do not use EMA for the teacher model, the teacher parameters and student # parameters are the same at this point in time log_validation(unet, noise_scheduler, args, accelerator, weight_dtype, global_step, "teacher") # teacher_unet.to(dtype=teacher_dtype) if args.use_ema: # Store the student unet weights and load the EMA weights. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( unet, noise_scheduler, args, accelerator, weight_dtype, global_step, "ema_student", ) # Restore student unet weights ema_unet.restore(unet.parameters()) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} if args.use_ema: logs["ema_decay"] = ema_unet.cur_decay_value progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # progress_bar.close() accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) pipeline = ConsistencyModelPipeline(unet=unet, scheduler=noise_scheduler) pipeline.save_pretrained(args.output_dir) # If using EMA, save EMA weights as well. if args.use_ema: ema_unet.copy_to(unet.parameters()) unet.save_pretrained(os.path.join(args.output_dir, "ema_unet")) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/consistency_training/train_cm_ct_unconditional.py/0
{ "file_path": "diffusers/examples/research_projects/consistency_training/train_cm_ct_unconditional.py", "repo_id": "diffusers", "token_count": 26288 }
108
# Stable Diffusion XL for JAX + TPUv5e [TPU v5e](https://cloud.google.com/blog/products/compute/how-cloud-tpu-v5e-accelerates-large-scale-ai-inference) is a new generation of TPUs from Google Cloud. It is the most cost-effective, versatile, and scalable Cloud TPU to date. This makes them ideal for serving and scaling large diffusion models. [JAX](https://github.com/google/jax) is a high-performance numerical computation library that is well-suited to develop and deploy diffusion models: - **High performance**. All JAX operations are implemented in terms of operations in [XLA](https://www.tensorflow.org/xla/) - the Accelerated Linear Algebra compiler - **Compilation**. JAX uses just-in-time (jit) compilation of JAX Python functions so it can be executed efficiently in XLA. In order to get the best performance, we must use static shapes for jitted functions, this is because JAX transforms work by tracing a function and to determine its effect on inputs of a specific shape and type. When a new shape is introduced to an already compiled function, it retriggers compilation on the new shape, which can greatly reduce performance. **Note**: JIT compilation is particularly well-suited for text-to-image generation because all inputs and outputs (image input / output sizes) are static. - **Parallelization**. Workloads can be scaled across multiple devices using JAX's [pmap](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html), which expresses single-program multiple-data (SPMD) programs. Applying pmap to a function will compile a function with XLA, then execute in parallel on XLA devices. For text-to-image generation workloads this means that increasing the number of images rendered simultaneously is straightforward to implement and doesn't compromise performance. ๐Ÿ‘‰ Try it out for yourself: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/google/sdxl) ## Stable Diffusion XL pipeline in JAX Upon having access to a TPU VM (TPUs higher than version 3), you should first install a TPU-compatible version of JAX: ``` pip install jax[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html ``` Next, we can install [flax](https://github.com/google/flax) and the diffusers library: ``` pip install flax diffusers transformers ``` In [sdxl_single.py](./sdxl_single.py) we give a simple example of how to write a text-to-image generation pipeline in JAX using [StabilityAI's Stable Diffusion XL](stabilityai/stable-diffusion-xl-base-1.0). Let's explain it step-by-step: **Imports and Setup** ```python import jax import jax.numpy as jnp import numpy as np from flax.jax_utils import replicate from diffusers import FlaxStableDiffusionXLPipeline from jax.experimental.compilation_cache import compilation_cache as cc cc.initialize_cache("/tmp/sdxl_cache") import time NUM_DEVICES = jax.device_count() ``` First, we import the necessary libraries: - `jax` is provides the primitives for TPU operations - `flax.jax_utils` contains some useful utility functions for `Flax`, a neural network library built on top of JAX - `diffusers` has all the code that is relevant for SDXL. - We also initialize a cache to speed up the JAX model compilation. - We automatically determine the number of available TPU devices. **1. Downloading Model and Loading Pipeline** ```python pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True ) ``` Here, a pre-trained model `stable-diffusion-xl-base-1.0` from the namespace `stabilityai` is loaded. It returns a pipeline for inference and its parameters. **2. Casting Parameter Types** ```python scheduler_state = params.pop("scheduler") params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params) params["scheduler"] = scheduler_state ``` This section adjusts the data types of the model parameters. We convert all parameters to `bfloat16` to speed-up the computation with model weights. **Note** that the scheduler parameters are **not** converted to `blfoat16` as the loss in precision is degrading the pipeline's performance too significantly. **3. Define Inputs to Pipeline** ```python default_prompt = ... default_neg_prompt = ... default_seed = 33 default_guidance_scale = 5.0 default_num_steps = 25 ``` Here, various default inputs for the pipeline are set, including the prompt, negative prompt, random seed, guidance scale, and the number of inference steps. **4. Tokenizing Inputs** ```python def tokenize_prompt(prompt, neg_prompt): prompt_ids = pipeline.prepare_inputs(prompt) neg_prompt_ids = pipeline.prepare_inputs(neg_prompt) return prompt_ids, neg_prompt_ids ``` This function tokenizes the given prompts. It's essential because the text encoders of SDXL don't understand raw text; they work with numbers. Tokenization converts text to numbers. **5. Parallelization and Replication** ```python p_params = replicate(params) def replicate_all(prompt_ids, neg_prompt_ids, seed): ... ``` To utilize JAX's parallel capabilities, the parameters and input tensors are duplicated across devices. The `replicate_all` function also ensures that every device produces a different image by creating a unique random seed for each device. **6. Putting Everything Together** ```python def generate(...): ... ``` This function integrates all the steps to produce the desired outputs from the model. It takes in prompts, tokenizes them, replicates them across devices, runs them through the pipeline, and converts the images to a format that's more interpretable (PIL format). **7. Compilation Step** ```python start = time.time() print(f"Compiling ...") generate(default_prompt, default_neg_prompt) print(f"Compiled in {time.time() - start}") ``` The initial run of the `generate` function will be slow because JAX compiles the function during this call. By running it once here, subsequent calls will be much faster. This section measures and prints the compilation time. **8. Fast Inference** ```python start = time.time() prompt = ... neg_prompt = ... images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") ``` Now that the function is compiled, this section shows how to use it for fast inference. It measures and prints the inference time. In summary, the code demonstrates how to load a pre-trained model using Flax and JAX, prepare it for inference, and run it efficiently using JAX's capabilities. ## Ahead of Time (AOT) Compilation FlaxStableDiffusionXLPipeline takes care of parallelization across multiple devices using jit. Now let's build parallelization ourselves. For this we will be using a JAX feature called [Ahead of Time](https://jax.readthedocs.io/en/latest/aot.html) (AOT) lowering and compilation. AOT allows to fully compile prior to execution time and have control over different parts of the compilation process. In [sdxl_single_aot.py](./sdxl_single_aot.py) we give a simple example of how to write our own parallelization logic for text-to-image generation pipeline in JAX using [StabilityAI's Stable Diffusion XL](stabilityai/stable-diffusion-xl-base-1.0) We add a `aot_compile` function that compiles the `pipeline._generate` function telling JAX which input arguments are static, that is, arguments that are known at compile time and won't change. In our case, it is num_inference_steps, height, width and return_latents. Once the function is compiled, these parameters are omitted from future calls and cannot be changed without modifying the code and recompiling. ```python def aot_compile( prompt=default_prompt, negative_prompt=default_neg_prompt, seed=default_seed, guidance_scale=default_guidance_scale, num_inference_steps=default_num_steps ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] return pmap( pipeline._generate,static_broadcasted_argnums=[3, 4, 5, 9] ).lower( prompt_ids, p_params, rng, num_inference_steps, # num_inference_steps height, # height width, # width g, None, neg_prompt_ids, False # return_latents ).compile() ```` Next we can compile the generate function by executing `aot_compile`. ```python start = time.time() print("Compiling ...") p_generate = aot_compile() print(f"Compiled in {time.time() - start}") ``` And again we put everything together in a `generate` function. ```python def generate( prompt, negative_prompt, seed=default_seed, guidance_scale=default_guidance_scale ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] images = p_generate( prompt_ids, p_params, rng, g, None, neg_prompt_ids) # convert the images to PIL images = images.reshape((images.shape[0] * images.shape[1], ) + images.shape[-3:]) return pipeline.numpy_to_pil(np.array(images)) ``` The first forward pass after AOT compilation still takes a while longer than subsequent passes, this is because on the first pass, JAX uses Python dispatch, which Fills the C++ dispatch cache. When using jit, this extra step is done automatically, but when using AOT compilation, it doesn't happen until the function call is made. ```python start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"First inference in {time.time() - start}") ``` From this point forward, any calls to generate should result in a faster inference time and it won't change. ```python start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") ```
diffusers/examples/research_projects/sdxl_flax/README.md/0
{ "file_path": "diffusers/examples/research_projects/sdxl_flax/README.md", "repo_id": "diffusers", "token_count": 3342 }
109
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import random import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from transformers.utils import ContextManagers import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "lambdalabs/pokemon-blip-captions": ("image", "text"), } def save_model_card( args, repo_id: str, images: list = None, repo_folder: str = None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" model_description = f""" # Text-to-image finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_description += wandb_info model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=args.pretrained_model_name_or_path, model_description=model_description, inference=True, ) tags = ["stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "diffusers-training"] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=accelerator.unwrap_model(vae), text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), safety_checker=None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that ๐Ÿค— Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. # For this to work properly all models must be run through `accelerate.prepare`. But accelerate # will try to assign the same optimizer with the same weights to all models during # `deepspeed.initialize`, which of course doesn't work. # # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 # frozen models from being partitioned during `zero.Init` which gets called during # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. with ContextManagers(deepspeed_zero_init_disabled_context_manager()): text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision ) # Freeze vae and text_encoder and set unet to trainable vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 args.mixed_precision = accelerator.mixed_precision elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 args.mixed_precision = accelerator.mixed_precision # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Function for unwrapping if model was compiled with `torch.compile`. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (latents.shape[0], latents.shape[1], 1, 1), device=latents.device ) if args.input_perturbation: new_noise = noise + args.input_perturbation * torch.randn_like(noise) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) if args.input_perturbation: noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) else: noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline = pipeline.to(accelerator.device) pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/text_to_image/train_text_to_image.py/0
{ "file_path": "diffusers/examples/text_to_image/train_text_to_image.py", "repo_id": "diffusers", "token_count": 19404 }
110
""" This script requires you to build `LAVIS` from source, since the pip version doesn't have BLIP Diffusion. Follow instructions here: https://github.com/salesforce/LAVIS/tree/main. """ import argparse import os import tempfile import torch from lavis.models import load_model_and_preprocess from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from diffusers import ( AutoencoderKL, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines import BlipDiffusionPipeline from diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel BLIP2_CONFIG = { "vision_config": { "hidden_size": 1024, "num_hidden_layers": 23, "num_attention_heads": 16, "image_size": 224, "patch_size": 14, "intermediate_size": 4096, "hidden_act": "quick_gelu", }, "qformer_config": { "cross_attention_frequency": 1, "encoder_hidden_size": 1024, "vocab_size": 30523, }, "num_query_tokens": 16, } blip2config = Blip2Config(**BLIP2_CONFIG) def qformer_model_from_original_config(): qformer = Blip2QFormerModel(blip2config) return qformer def embeddings_from_original_checkpoint(model, diffuser_embeddings_prefix, original_embeddings_prefix): embeddings = {} embeddings.update( { f"{diffuser_embeddings_prefix}.word_embeddings.weight": model[ f"{original_embeddings_prefix}.word_embeddings.weight" ] } ) embeddings.update( { f"{diffuser_embeddings_prefix}.position_embeddings.weight": model[ f"{original_embeddings_prefix}.position_embeddings.weight" ] } ) embeddings.update( {f"{diffuser_embeddings_prefix}.LayerNorm.weight": model[f"{original_embeddings_prefix}.LayerNorm.weight"]} ) embeddings.update( {f"{diffuser_embeddings_prefix}.LayerNorm.bias": model[f"{original_embeddings_prefix}.LayerNorm.bias"]} ) return embeddings def proj_layer_from_original_checkpoint(model, diffuser_proj_prefix, original_proj_prefix): proj_layer = {} proj_layer.update({f"{diffuser_proj_prefix}.dense1.weight": model[f"{original_proj_prefix}.dense1.weight"]}) proj_layer.update({f"{diffuser_proj_prefix}.dense1.bias": model[f"{original_proj_prefix}.dense1.bias"]}) proj_layer.update({f"{diffuser_proj_prefix}.dense2.weight": model[f"{original_proj_prefix}.dense2.weight"]}) proj_layer.update({f"{diffuser_proj_prefix}.dense2.bias": model[f"{original_proj_prefix}.dense2.bias"]}) proj_layer.update({f"{diffuser_proj_prefix}.LayerNorm.weight": model[f"{original_proj_prefix}.LayerNorm.weight"]}) proj_layer.update({f"{diffuser_proj_prefix}.LayerNorm.bias": model[f"{original_proj_prefix}.LayerNorm.bias"]}) return proj_layer def attention_from_original_checkpoint(model, diffuser_attention_prefix, original_attention_prefix): attention = {} attention.update( { f"{diffuser_attention_prefix}.attention.query.weight": model[ f"{original_attention_prefix}.self.query.weight" ] } ) attention.update( {f"{diffuser_attention_prefix}.attention.query.bias": model[f"{original_attention_prefix}.self.query.bias"]} ) attention.update( {f"{diffuser_attention_prefix}.attention.key.weight": model[f"{original_attention_prefix}.self.key.weight"]} ) attention.update( {f"{diffuser_attention_prefix}.attention.key.bias": model[f"{original_attention_prefix}.self.key.bias"]} ) attention.update( { f"{diffuser_attention_prefix}.attention.value.weight": model[ f"{original_attention_prefix}.self.value.weight" ] } ) attention.update( {f"{diffuser_attention_prefix}.attention.value.bias": model[f"{original_attention_prefix}.self.value.bias"]} ) attention.update( {f"{diffuser_attention_prefix}.output.dense.weight": model[f"{original_attention_prefix}.output.dense.weight"]} ) attention.update( {f"{diffuser_attention_prefix}.output.dense.bias": model[f"{original_attention_prefix}.output.dense.bias"]} ) attention.update( { f"{diffuser_attention_prefix}.output.LayerNorm.weight": model[ f"{original_attention_prefix}.output.LayerNorm.weight" ] } ) attention.update( { f"{diffuser_attention_prefix}.output.LayerNorm.bias": model[ f"{original_attention_prefix}.output.LayerNorm.bias" ] } ) return attention def output_layers_from_original_checkpoint(model, diffuser_output_prefix, original_output_prefix): output_layers = {} output_layers.update({f"{diffuser_output_prefix}.dense.weight": model[f"{original_output_prefix}.dense.weight"]}) output_layers.update({f"{diffuser_output_prefix}.dense.bias": model[f"{original_output_prefix}.dense.bias"]}) output_layers.update( {f"{diffuser_output_prefix}.LayerNorm.weight": model[f"{original_output_prefix}.LayerNorm.weight"]} ) output_layers.update( {f"{diffuser_output_prefix}.LayerNorm.bias": model[f"{original_output_prefix}.LayerNorm.bias"]} ) return output_layers def encoder_from_original_checkpoint(model, diffuser_encoder_prefix, original_encoder_prefix): encoder = {} for i in range(blip2config.qformer_config.num_hidden_layers): encoder.update( attention_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.attention", f"{original_encoder_prefix}.{i}.attention" ) ) encoder.update( attention_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.crossattention", f"{original_encoder_prefix}.{i}.crossattention" ) ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate.dense.weight": model[ f"{original_encoder_prefix}.{i}.intermediate.dense.weight" ] } ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate.dense.bias": model[ f"{original_encoder_prefix}.{i}.intermediate.dense.bias" ] } ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate_query.dense.weight": model[ f"{original_encoder_prefix}.{i}.intermediate_query.dense.weight" ] } ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate_query.dense.bias": model[ f"{original_encoder_prefix}.{i}.intermediate_query.dense.bias" ] } ) encoder.update( output_layers_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.output", f"{original_encoder_prefix}.{i}.output" ) ) encoder.update( output_layers_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.output_query", f"{original_encoder_prefix}.{i}.output_query" ) ) return encoder def visual_encoder_layer_from_original_checkpoint(model, diffuser_prefix, original_prefix): visual_encoder_layer = {} visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm1.weight": model[f"{original_prefix}.ln_1.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm1.bias": model[f"{original_prefix}.ln_1.bias"]}) visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm2.weight": model[f"{original_prefix}.ln_2.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm2.bias": model[f"{original_prefix}.ln_2.bias"]}) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.qkv.weight": model[f"{original_prefix}.attn.in_proj_weight"]} ) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.qkv.bias": model[f"{original_prefix}.attn.in_proj_bias"]} ) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.projection.weight": model[f"{original_prefix}.attn.out_proj.weight"]} ) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.projection.bias": model[f"{original_prefix}.attn.out_proj.bias"]} ) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc1.weight": model[f"{original_prefix}.mlp.c_fc.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc1.bias": model[f"{original_prefix}.mlp.c_fc.bias"]}) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc2.weight": model[f"{original_prefix}.mlp.c_proj.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc2.bias": model[f"{original_prefix}.mlp.c_proj.bias"]}) return visual_encoder_layer def visual_encoder_from_original_checkpoint(model, diffuser_prefix, original_prefix): visual_encoder = {} visual_encoder.update( { f"{diffuser_prefix}.embeddings.class_embedding": model[f"{original_prefix}.class_embedding"] .unsqueeze(0) .unsqueeze(0) } ) visual_encoder.update( { f"{diffuser_prefix}.embeddings.position_embedding": model[ f"{original_prefix}.positional_embedding" ].unsqueeze(0) } ) visual_encoder.update( {f"{diffuser_prefix}.embeddings.patch_embedding.weight": model[f"{original_prefix}.conv1.weight"]} ) visual_encoder.update({f"{diffuser_prefix}.pre_layernorm.weight": model[f"{original_prefix}.ln_pre.weight"]}) visual_encoder.update({f"{diffuser_prefix}.pre_layernorm.bias": model[f"{original_prefix}.ln_pre.bias"]}) for i in range(blip2config.vision_config.num_hidden_layers): visual_encoder.update( visual_encoder_layer_from_original_checkpoint( model, f"{diffuser_prefix}.encoder.layers.{i}", f"{original_prefix}.transformer.resblocks.{i}" ) ) visual_encoder.update({f"{diffuser_prefix}.post_layernorm.weight": model["blip.ln_vision.weight"]}) visual_encoder.update({f"{diffuser_prefix}.post_layernorm.bias": model["blip.ln_vision.bias"]}) return visual_encoder def qformer_original_checkpoint_to_diffusers_checkpoint(model): qformer_checkpoint = {} qformer_checkpoint.update(embeddings_from_original_checkpoint(model, "embeddings", "blip.Qformer.bert.embeddings")) qformer_checkpoint.update({"query_tokens": model["blip.query_tokens"]}) qformer_checkpoint.update(proj_layer_from_original_checkpoint(model, "proj_layer", "proj_layer")) qformer_checkpoint.update( encoder_from_original_checkpoint(model, "encoder.layer", "blip.Qformer.bert.encoder.layer") ) qformer_checkpoint.update(visual_encoder_from_original_checkpoint(model, "visual_encoder", "blip.visual_encoder")) return qformer_checkpoint def get_qformer(model): print("loading qformer") qformer = qformer_model_from_original_config() qformer_diffusers_checkpoint = qformer_original_checkpoint_to_diffusers_checkpoint(model) load_checkpoint_to_model(qformer_diffusers_checkpoint, qformer) print("done loading qformer") return qformer def load_checkpoint_to_model(checkpoint, model): with tempfile.NamedTemporaryFile(delete=False) as file: torch.save(checkpoint, file.name) del checkpoint model.load_state_dict(torch.load(file.name), strict=False) os.remove(file.name) def save_blip_diffusion_model(model, args): qformer = get_qformer(model) qformer.eval() text_encoder = ContextCLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder") vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae") unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") vae.eval() text_encoder.eval() scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer") image_processor = BlipImageProcessor() blip_diffusion = BlipDiffusionPipeline( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, image_processor=image_processor, ) blip_diffusion.save_pretrained(args.checkpoint_path) def main(args): model, _, _ = load_model_and_preprocess("blip_diffusion", "base", device="cpu", is_eval=True) save_blip_diffusion_model(model.state_dict(), args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() main(args)
diffusers/scripts/convert_blipdiffusion_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_blipdiffusion_to_diffusers.py", "repo_id": "diffusers", "token_count": 5920 }
111
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the LDM checkpoints.""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNet2DModel, VQModel def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("proj_out.weight", "proj_attn.weight") new_item = new_item.replace("proj_out.bias", "proj_attn.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def convert_ldm_checkpoint(checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"] new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"] new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"] new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["num_res_blocks"] + 1) layer_in_block_id = (i - 1) % (config["num_res_blocks"] + 1) resnets = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in checkpoint: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = checkpoint[ f"input_blocks.{i}.0.op.weight" ] new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = checkpoint[ f"input_blocks.{i}.0.op.bias" ] continue paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} resnet_op = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path, resnet_op], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}", } to_split = { f"input_blocks.{i}.1.qkv.bias": { "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"input_blocks.{i}.1.qkv.weight": { "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], attention_paths_to_split=to_split, config=config, ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, checkpoint, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, checkpoint, config=config) attentions_paths = renew_attention_paths(attentions) to_split = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( attentions_paths, new_checkpoint, checkpoint, attention_paths_to_split=to_split, config=config ) for i in range(num_output_blocks): block_id = i // (config["num_res_blocks"] + 1) layer_in_block_id = i % (config["num_res_blocks"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], config=config) if ["conv.weight", "conv.bias"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } to_split = { f"output_blocks.{i}.1.qkv.bias": { "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"output_blocks.{i}.1.qkv.weight": { "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], attention_paths_to_split=to_split if any("qkv" in key for key in attentions) else None, config=config, ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() checkpoint = torch.load(args.checkpoint_path) with open(args.config_file) as f: config = json.loads(f.read()) converted_checkpoint = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] model = UNet2DModel(**config) model.load_state_dict(converted_checkpoint) try: scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) vqvae = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) pipe = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py", "repo_id": "diffusers", "token_count": 6854 }
112
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def onnx_export( model, model_args: tuple, output_path: Path, ordered_input_names, output_names, dynamic_axes, opset, use_external_data_format=False, ): output_path.parent.mkdir(parents=True, exist_ok=True) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, use_external_data_format=use_external_data_format, enable_onnx_checker=True, opset_version=opset, ) else: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset, ) @torch.no_grad() def convert_models(model_path: str, output_path: str, opset: int, fp16: bool = False): dtype = torch.float16 if fp16 else torch.float32 if fp16 and torch.cuda.is_available(): device = "cuda" elif fp16 and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA") else: device = "cpu" pipeline = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype).to(device) output_path = Path(output_path) # TEXT ENCODER num_tokens = pipeline.text_encoder.config.max_position_embeddings text_hidden_size = pipeline.text_encoder.config.hidden_size text_input = pipeline.tokenizer( "A sample prompt", padding="max_length", max_length=pipeline.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) onnx_export( pipeline.text_encoder, # casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files model_args=(text_input.input_ids.to(device=device, dtype=torch.int32)), output_path=output_path / "text_encoder" / "model.onnx", ordered_input_names=["input_ids"], output_names=["last_hidden_state", "pooler_output"], dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, }, opset=opset, ) del pipeline.text_encoder # UNET unet_in_channels = pipeline.unet.config.in_channels unet_sample_size = pipeline.unet.config.sample_size unet_path = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet, model_args=( torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype), torch.randn(2).to(device=device, dtype=dtype), torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype), False, ), output_path=unet_path, ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"], output_names=["out_sample"], # has to be different from "sample" for correct tracing dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, }, opset=opset, use_external_data_format=True, # UNet is > 2GB, so the weights need to be split ) unet_model_path = str(unet_path.absolute().as_posix()) unet_dir = os.path.dirname(unet_model_path) unet = onnx.load(unet_model_path) # clean up existing tensor files shutil.rmtree(unet_dir) os.mkdir(unet_dir) # collate external tensor files into one onnx.save_model( unet, unet_model_path, save_as_external_data=True, all_tensors_to_one_file=True, location="weights.pb", convert_attribute=False, ) del pipeline.unet # VAE ENCODER vae_encoder = pipeline.vae vae_in_channels = vae_encoder.config.in_channels vae_sample_size = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder vae_encoder.forward = lambda sample, return_dict: vae_encoder.encode(sample, return_dict)[0].sample() onnx_export( vae_encoder, model_args=( torch.randn(1, vae_in_channels, vae_sample_size, vae_sample_size).to(device=device, dtype=dtype), False, ), output_path=output_path / "vae_encoder" / "model.onnx", ordered_input_names=["sample", "return_dict"], output_names=["latent_sample"], dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=opset, ) # VAE DECODER vae_decoder = pipeline.vae vae_latent_channels = vae_decoder.config.latent_channels vae_out_channels = vae_decoder.config.out_channels # forward only through the decoder part vae_decoder.forward = vae_encoder.decode onnx_export( vae_decoder, model_args=( torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype), False, ), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=opset, ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: safety_checker = pipeline.safety_checker clip_num_channels = safety_checker.config.vision_config.num_channels clip_image_size = safety_checker.config.vision_config.image_size safety_checker.forward = safety_checker.forward_onnx onnx_export( pipeline.safety_checker, model_args=( torch.randn( 1, clip_num_channels, clip_image_size, clip_image_size, ).to(device=device, dtype=dtype), torch.randn(1, vae_sample_size, vae_sample_size, vae_out_channels).to(device=device, dtype=dtype), ), output_path=output_path / "safety_checker" / "model.onnx", ordered_input_names=["clip_input", "images"], output_names=["out_images", "has_nsfw_concepts"], dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, }, opset=opset, ) del pipeline.safety_checker safety_checker = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker") feature_extractor = pipeline.feature_extractor else: safety_checker = None feature_extractor = None onnx_pipeline = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder"), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder"), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder"), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / "unet"), scheduler=pipeline.scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, requires_safety_checker=safety_checker is not None, ) onnx_pipeline.save_pretrained(output_path) print("ONNX pipeline saved to", output_path) del pipeline del onnx_pipeline _ = OnnxStableDiffusionPipeline.from_pretrained(output_path, provider="CPUExecutionProvider") print("ONNX pipeline is loadable") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fp16)
diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py/0
{ "file_path": "diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py", "repo_id": "diffusers", "token_count": 4384 }
113
__version__ = "0.28.0.dev0" from typing import TYPE_CHECKING from .utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_k_diffusion_available, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, ) # Lazy Import based on # https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py # When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names, # and is used to defer the actual importing for when the objects are requested. # This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends). _import_structure = { "configuration_utils": ["ConfigMixin"], "models": [], "pipelines": [], "schedulers": [], "utils": [ "OptionalDependencyNotAvailable", "is_flax_available", "is_inflect_available", "is_invisible_watermark_available", "is_k_diffusion_available", "is_k_diffusion_version", "is_librosa_available", "is_note_seq_available", "is_onnx_available", "is_scipy_available", "is_torch_available", "is_torchsde_available", "is_transformers_available", "is_transformers_version", "is_unidecode_available", "logging", ], } try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_onnx_objects # noqa F403 _import_structure["utils.dummy_onnx_objects"] = [ name for name in dir(dummy_onnx_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend(["OnnxRuntimeModel"]) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_pt_objects # noqa F403 _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")] else: _import_structure["models"].extend( [ "AsymmetricAutoencoderKL", "AutoencoderKL", "AutoencoderKLTemporalDecoder", "AutoencoderTiny", "ConsistencyDecoderVAE", "ControlNetModel", "I2VGenXLUNet", "Kandinsky3UNet", "ModelMixin", "MotionAdapter", "MultiAdapter", "PriorTransformer", "StableCascadeUNet", "T2IAdapter", "T5FilmDecoder", "Transformer2DModel", "UNet1DModel", "UNet2DConditionModel", "UNet2DModel", "UNet3DConditionModel", "UNetMotionModel", "UNetSpatioTemporalConditionModel", "UVit2DModel", "VQModel", ] ) _import_structure["optimization"] = [ "get_constant_schedule", "get_constant_schedule_with_warmup", "get_cosine_schedule_with_warmup", "get_cosine_with_hard_restarts_schedule_with_warmup", "get_linear_schedule_with_warmup", "get_polynomial_decay_schedule_with_warmup", "get_scheduler", ] _import_structure["pipelines"].extend( [ "AudioPipelineOutput", "AutoPipelineForImage2Image", "AutoPipelineForInpainting", "AutoPipelineForText2Image", "ConsistencyModelPipeline", "DanceDiffusionPipeline", "DDIMPipeline", "DDPMPipeline", "DiffusionPipeline", "DiTPipeline", "ImagePipelineOutput", "KarrasVePipeline", "LDMPipeline", "LDMSuperResolutionPipeline", "PNDMPipeline", "RePaintPipeline", "ScoreSdeVePipeline", "StableDiffusionMixin", ] ) _import_structure["schedulers"].extend( [ "AmusedScheduler", "CMStochasticIterativeScheduler", "DDIMInverseScheduler", "DDIMParallelScheduler", "DDIMScheduler", "DDPMParallelScheduler", "DDPMScheduler", "DDPMWuerstchenScheduler", "DEISMultistepScheduler", "DPMSolverMultistepInverseScheduler", "DPMSolverMultistepScheduler", "DPMSolverSinglestepScheduler", "EDMDPMSolverMultistepScheduler", "EDMEulerScheduler", "EulerAncestralDiscreteScheduler", "EulerDiscreteScheduler", "HeunDiscreteScheduler", "IPNDMScheduler", "KarrasVeScheduler", "KDPM2AncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "LCMScheduler", "PNDMScheduler", "RePaintScheduler", "SASolverScheduler", "SchedulerMixin", "ScoreSdeVeScheduler", "TCDScheduler", "UnCLIPScheduler", "UniPCMultistepScheduler", "VQDiffusionScheduler", ] ) _import_structure["training_utils"] = ["EMAModel"] try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_scipy_objects # noqa F403 _import_structure["utils.dummy_torch_and_scipy_objects"] = [ name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_") ] else: _import_structure["schedulers"].extend(["LMSDiscreteScheduler"]) try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_torchsde_objects # noqa F403 _import_structure["utils.dummy_torch_and_torchsde_objects"] = [ name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_") ] else: _import_structure["schedulers"].extend(["DPMSolverSDEScheduler"]) try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_objects # noqa F403 _import_structure["utils.dummy_torch_and_transformers_objects"] = [ name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend( [ "AltDiffusionImg2ImgPipeline", "AltDiffusionPipeline", "AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline", "AnimateDiffPipeline", "AnimateDiffVideoToVideoPipeline", "AudioLDM2Pipeline", "AudioLDM2ProjectionModel", "AudioLDM2UNet2DConditionModel", "AudioLDMPipeline", "BlipDiffusionControlNetPipeline", "BlipDiffusionPipeline", "CLIPImageProjection", "CycleDiffusionPipeline", "I2VGenXLPipeline", "IFImg2ImgPipeline", "IFImg2ImgSuperResolutionPipeline", "IFInpaintingPipeline", "IFInpaintingSuperResolutionPipeline", "IFPipeline", "IFSuperResolutionPipeline", "ImageTextPipelineOutput", "Kandinsky3Img2ImgPipeline", "Kandinsky3Pipeline", "KandinskyCombinedPipeline", "KandinskyImg2ImgCombinedPipeline", "KandinskyImg2ImgPipeline", "KandinskyInpaintCombinedPipeline", "KandinskyInpaintPipeline", "KandinskyPipeline", "KandinskyPriorPipeline", "KandinskyV22CombinedPipeline", "KandinskyV22ControlnetImg2ImgPipeline", "KandinskyV22ControlnetPipeline", "KandinskyV22Img2ImgCombinedPipeline", "KandinskyV22Img2ImgPipeline", "KandinskyV22InpaintCombinedPipeline", "KandinskyV22InpaintPipeline", "KandinskyV22Pipeline", "KandinskyV22PriorEmb2EmbPipeline", "KandinskyV22PriorPipeline", "LatentConsistencyModelImg2ImgPipeline", "LatentConsistencyModelPipeline", "LDMTextToImagePipeline", "LEditsPPPipelineStableDiffusion", "LEditsPPPipelineStableDiffusionXL", "MusicLDMPipeline", "PaintByExamplePipeline", "PIAPipeline", "PixArtAlphaPipeline", "SemanticStableDiffusionPipeline", "ShapEImg2ImgPipeline", "ShapEPipeline", "StableCascadeCombinedPipeline", "StableCascadeDecoderPipeline", "StableCascadePriorPipeline", "StableDiffusionAdapterPipeline", "StableDiffusionAttendAndExcitePipeline", "StableDiffusionControlNetImg2ImgPipeline", "StableDiffusionControlNetInpaintPipeline", "StableDiffusionControlNetPipeline", "StableDiffusionDepth2ImgPipeline", "StableDiffusionDiffEditPipeline", "StableDiffusionGLIGENPipeline", "StableDiffusionGLIGENTextImagePipeline", "StableDiffusionImageVariationPipeline", "StableDiffusionImg2ImgPipeline", "StableDiffusionInpaintPipeline", "StableDiffusionInpaintPipelineLegacy", "StableDiffusionInstructPix2PixPipeline", "StableDiffusionLatentUpscalePipeline", "StableDiffusionLDM3DPipeline", "StableDiffusionModelEditingPipeline", "StableDiffusionPanoramaPipeline", "StableDiffusionParadigmsPipeline", "StableDiffusionPipeline", "StableDiffusionPipelineSafe", "StableDiffusionPix2PixZeroPipeline", "StableDiffusionSAGPipeline", "StableDiffusionUpscalePipeline", "StableDiffusionXLAdapterPipeline", "StableDiffusionXLControlNetImg2ImgPipeline", "StableDiffusionXLControlNetInpaintPipeline", "StableDiffusionXLControlNetPipeline", "StableDiffusionXLImg2ImgPipeline", "StableDiffusionXLInpaintPipeline", "StableDiffusionXLInstructPix2PixPipeline", "StableDiffusionXLPipeline", "StableUnCLIPImg2ImgPipeline", "StableUnCLIPPipeline", "StableVideoDiffusionPipeline", "TextToVideoSDPipeline", "TextToVideoZeroPipeline", "TextToVideoZeroSDXLPipeline", "UnCLIPImageVariationPipeline", "UnCLIPPipeline", "UniDiffuserModel", "UniDiffuserPipeline", "UniDiffuserTextDecoder", "VersatileDiffusionDualGuidedPipeline", "VersatileDiffusionImageVariationPipeline", "VersatileDiffusionPipeline", "VersatileDiffusionTextToImagePipeline", "VideoToVideoSDPipeline", "VQDiffusionPipeline", "WuerstchenCombinedPipeline", "WuerstchenDecoderPipeline", "WuerstchenPriorPipeline", ] ) try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 _import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [ name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"]) try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403 _import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [ name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend( [ "OnnxStableDiffusionImg2ImgPipeline", "OnnxStableDiffusionInpaintPipeline", "OnnxStableDiffusionInpaintPipelineLegacy", "OnnxStableDiffusionPipeline", "OnnxStableDiffusionUpscalePipeline", "StableDiffusionOnnxPipeline", ] ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_librosa_objects # noqa F403 _import_structure["utils.dummy_torch_and_librosa_objects"] = [ name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"]) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 _import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [ name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"]) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_flax_objects # noqa F403 _import_structure["utils.dummy_flax_objects"] = [ name for name in dir(dummy_flax_objects) if not name.startswith("_") ] else: _import_structure["models.controlnet_flax"] = ["FlaxControlNetModel"] _import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"] _import_structure["models.unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] _import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"] _import_structure["pipelines"].extend(["FlaxDiffusionPipeline"]) _import_structure["schedulers"].extend( [ "FlaxDDIMScheduler", "FlaxDDPMScheduler", "FlaxDPMSolverMultistepScheduler", "FlaxEulerDiscreteScheduler", "FlaxKarrasVeScheduler", "FlaxLMSDiscreteScheduler", "FlaxPNDMScheduler", "FlaxSchedulerMixin", "FlaxScoreSdeVeScheduler", ] ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_flax_and_transformers_objects # noqa F403 _import_structure["utils.dummy_flax_and_transformers_objects"] = [ name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend( [ "FlaxStableDiffusionControlNetPipeline", "FlaxStableDiffusionImg2ImgPipeline", "FlaxStableDiffusionInpaintPipeline", "FlaxStableDiffusionPipeline", "FlaxStableDiffusionXLPipeline", ] ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_note_seq_objects # noqa F403 _import_structure["utils.dummy_note_seq_objects"] = [ name for name in dir(dummy_note_seq_objects) if not name.startswith("_") ] else: _import_structure["pipelines"].extend(["MidiProcessor"]) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .configuration_utils import ConfigMixin try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderKLTemporalDecoder, AutoencoderTiny, ConsistencyDecoderVAE, ControlNetModel, I2VGenXLUNet, Kandinsky3UNet, ModelMixin, MotionAdapter, MultiAdapter, PriorTransformer, T2IAdapter, T5FilmDecoder, Transformer2DModel, UNet1DModel, UNet2DConditionModel, UNet2DModel, UNet3DConditionModel, UNetMotionModel, UNetSpatioTemporalConditionModel, UVit2DModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, BlipDiffusionControlNetPipeline, BlipDiffusionPipeline, CLIPImageProjection, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, StableDiffusionMixin, ) from .schedulers import ( AmusedScheduler, CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DDPMWuerstchenScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EDMDPMSolverMultistepScheduler, EDMEulerScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPM2AncestralDiscreteScheduler, KDPM2DiscreteScheduler, LCMScheduler, PNDMScheduler, RePaintScheduler, SASolverScheduler, SchedulerMixin, ScoreSdeVeScheduler, TCDScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline, AnimateDiffPipeline, AnimateDiffVideoToVideoPipeline, AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AudioLDMPipeline, CLIPImageProjection, CycleDiffusionPipeline, I2VGenXLPipeline, IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline, KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyImg2ImgPipeline, KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyV22CombinedPipeline, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22ControlnetPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22Img2ImgPipeline, KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, LDMTextToImagePipeline, LEditsPPPipelineStableDiffusion, LEditsPPPipelineStableDiffusionXL, MusicLDMPipeline, PaintByExamplePipeline, PIAPipeline, PixArtAlphaPipeline, SemanticStableDiffusionPipeline, ShapEImg2ImgPipeline, ShapEPipeline, StableCascadeCombinedPipeline, StableCascadeDecoderPipeline, StableCascadePriorPipeline, StableDiffusionAdapterPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepth2ImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline, StableDiffusionImageVariationPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPix2PixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDM3DPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPix2PixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableDiffusionXLAdapterPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLInstructPix2PixPipeline, StableDiffusionXLPipeline, StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline, StableVideoDiffusionPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, TextToVideoZeroSDXLPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImg2ImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unets.unet_2d_condition_flax import FlaxUNet2DConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxEulerDiscreteScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImg2ImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, FlaxStableDiffusionXLPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, extra_objects={"__version__": __version__}, )
diffusers/src/diffusers/__init__.py/0
{ "file_path": "diffusers/src/diffusers/__init__.py", "repo_id": "diffusers", "token_count": 14100 }
114
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import Dict, List, Optional, Union import torch from huggingface_hub.utils import validate_hf_hub_args from safetensors import safe_open from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict from ..utils import ( _get_model_file, is_accelerate_available, is_torch_version, is_transformers_available, logging, ) if is_transformers_available(): from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from ..models.attention_processor import ( IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, ) logger = logging.get_logger(__name__) class IPAdapterMixin: """Mixin for handling IP Adapters.""" @validate_hf_hub_args def load_ip_adapter( self, pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], subfolder: Union[str, List[str]], weight_name: Union[str, List[str]], image_encoder_folder: Optional[str] = "image_encoder", **kwargs, ): """ Parameters: pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved with [`ModelMixin.save_pretrained`]. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). subfolder (`str` or `List[str]`): The subfolder location of a model file within a larger model repository on the Hub or locally. If a list is passed, it should have the same length as `weight_name`. weight_name (`str` or `List[str]`): The name of the weight file to load. If a list is passed, it should have the same length as `weight_name`. image_encoder_folder (`str`, *optional*, defaults to `image_encoder`): The subfolder location of the image encoder within a larger model repository on the Hub or locally. Pass `None` to not load the image encoder. If the image encoder is located in a folder inside `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g. `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than `subfolder`, you should pass the path to the folder that contains image encoder weights, for example, `image_encoder_folder="different_subfolder/image_encoder"`. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. """ # handle the list inputs for multiple IP Adapters if not isinstance(weight_name, list): weight_name = [weight_name] if not isinstance(pretrained_model_name_or_path_or_dict, list): pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] if len(pretrained_model_name_or_path_or_dict) == 1: pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) if not isinstance(subfolder, list): subfolder = [subfolder] if len(subfolder) == 1: subfolder = subfolder * len(weight_name) if len(weight_name) != len(pretrained_model_name_or_path_or_dict): raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.") if len(weight_name) != len(subfolder): raise ValueError("`weight_name` and `subfolder` must have the same length.") # Load the main state dict first. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) user_agent = { "file_type": "attn_procs_weights", "framework": "pytorch", } state_dicts = [] for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip( pretrained_model_name_or_path_or_dict, weight_name, subfolder ): if not isinstance(pretrained_model_name_or_path_or_dict, dict): model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) if weight_name.endswith(".safetensors"): state_dict = {"image_proj": {}, "ip_adapter": {}} with safe_open(model_file, framework="pt", device="cpu") as f: for key in f.keys(): if key.startswith("image_proj."): state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) elif key.startswith("ip_adapter."): state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) else: state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict keys = list(state_dict.keys()) if keys != ["image_proj", "ip_adapter"]: raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") state_dicts.append(state_dict) # load CLIP image encoder here if it has not been registered to the pipeline yet if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None: if image_encoder_folder is not None: if not isinstance(pretrained_model_name_or_path_or_dict, dict): logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}") if image_encoder_folder.count("/") == 0: image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix() else: image_encoder_subfolder = Path(image_encoder_folder).as_posix() image_encoder = CLIPVisionModelWithProjection.from_pretrained( pretrained_model_name_or_path_or_dict, subfolder=image_encoder_subfolder, low_cpu_mem_usage=low_cpu_mem_usage, ).to(self.device, dtype=self.dtype) self.register_modules(image_encoder=image_encoder) else: raise ValueError( "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict." ) else: logger.warning( "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter." "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead." ) # create feature extractor if it has not been registered to the pipeline yet if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None: feature_extractor = CLIPImageProcessor() self.register_modules(feature_extractor=feature_extractor) # load ip-adapter into unet unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) def set_ip_adapter_scale(self, scale): """ Sets the conditioning scale between text and image. Example: ```py pipeline.set_ip_adapter_scale(0.5) ``` """ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet for attn_processor in unet.attn_processors.values(): if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): if not isinstance(scale, list): scale = [scale] * len(attn_processor.scale) if len(attn_processor.scale) != len(scale): raise ValueError( f"`scale` should be a list of same length as the number if ip-adapters " f"Expected {len(attn_processor.scale)} but got {len(scale)}." ) attn_processor.scale = scale def unload_ip_adapter(self): """ Unloads the IP Adapter weights Examples: ```python >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. >>> pipeline.unload_ip_adapter() >>> ... ``` """ # remove CLIP image encoder if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None: self.image_encoder = None self.register_to_config(image_encoder=[None, None]) # remove feature extractor only when safety_checker is None as safety_checker uses # the feature_extractor later if not hasattr(self, "safety_checker"): if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None: self.feature_extractor = None self.register_to_config(feature_extractor=[None, None]) # remove hidden encoder self.unet.encoder_hid_proj = None self.config.encoder_hid_dim_type = None # restore original Unet attention processors layers self.unet.set_default_attn_processor()
diffusers/src/diffusers/loaders/ip_adapter.py/0
{ "file_path": "diffusers/src/diffusers/loaders/ip_adapter.py", "repo_id": "diffusers", "token_count": 6444 }
115
from .autoencoder_asym_kl import AsymmetricAutoencoderKL from .autoencoder_kl import AutoencoderKL from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder from .autoencoder_tiny import AutoencoderTiny from .consistency_decoder_vae import ConsistencyDecoderVAE
diffusers/src/diffusers/models/autoencoders/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/__init__.py", "repo_id": "diffusers", "token_count": 99 }
116
from dataclasses import dataclass from ..utils import BaseOutput @dataclass class AutoencoderKLOutput(BaseOutput): """ Output of AutoencoderKL encoding method. Args: latent_dist (`DiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`. `DiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "DiagonalGaussianDistribution" # noqa: F821
diffusers/src/diffusers/models/modeling_outputs.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_outputs.py", "repo_id": "diffusers", "token_count": 178 }
117
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput, logging from ..attention_processor import Attention, AttentionProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class Kandinsky3UNetOutput(BaseOutput): sample: torch.FloatTensor = None class Kandinsky3EncoderProj(nn.Module): def __init__(self, encoder_hid_dim, cross_attention_dim): super().__init__() self.projection_linear = nn.Linear(encoder_hid_dim, cross_attention_dim, bias=False) self.projection_norm = nn.LayerNorm(cross_attention_dim) def forward(self, x): x = self.projection_linear(x) x = self.projection_norm(x) return x class Kandinsky3UNet(ModelMixin, ConfigMixin): @register_to_config def __init__( self, in_channels: int = 4, time_embedding_dim: int = 1536, groups: int = 32, attention_head_dim: int = 64, layers_per_block: Union[int, Tuple[int]] = 3, block_out_channels: Tuple[int] = (384, 768, 1536, 3072), cross_attention_dim: Union[int, Tuple[int]] = 4096, encoder_hid_dim: int = 4096, ): super().__init__() # TOOD(Yiyi): Give better name and put into config for the following 4 parameters expansion_ratio = 4 compression_ratio = 2 add_cross_attention = (False, True, True, True) add_self_attention = (False, True, True, True) out_channels = in_channels init_channels = block_out_channels[0] // 2 self.time_proj = Timesteps(init_channels, flip_sin_to_cos=False, downscale_freq_shift=1) self.time_embedding = TimestepEmbedding( init_channels, time_embedding_dim, ) self.add_time_condition = Kandinsky3AttentionPooling( time_embedding_dim, cross_attention_dim, attention_head_dim ) self.conv_in = nn.Conv2d(in_channels, init_channels, kernel_size=3, padding=1) self.encoder_hid_proj = Kandinsky3EncoderProj(encoder_hid_dim, cross_attention_dim) hidden_dims = [init_channels] + list(block_out_channels) in_out_dims = list(zip(hidden_dims[:-1], hidden_dims[1:])) text_dims = [cross_attention_dim if is_exist else None for is_exist in add_cross_attention] num_blocks = len(block_out_channels) * [layers_per_block] layer_params = [num_blocks, text_dims, add_self_attention] rev_layer_params = map(reversed, layer_params) cat_dims = [] self.num_levels = len(in_out_dims) self.down_blocks = nn.ModuleList([]) for level, ((in_dim, out_dim), res_block_num, text_dim, self_attention) in enumerate( zip(in_out_dims, *layer_params) ): down_sample = level != (self.num_levels - 1) cat_dims.append(out_dim if level != (self.num_levels - 1) else 0) self.down_blocks.append( Kandinsky3DownSampleBlock( in_dim, out_dim, time_embedding_dim, text_dim, res_block_num, groups, attention_head_dim, expansion_ratio, compression_ratio, down_sample, self_attention, ) ) self.up_blocks = nn.ModuleList([]) for level, ((out_dim, in_dim), res_block_num, text_dim, self_attention) in enumerate( zip(reversed(in_out_dims), *rev_layer_params) ): up_sample = level != 0 self.up_blocks.append( Kandinsky3UpSampleBlock( in_dim, cat_dims.pop(), out_dim, time_embedding_dim, text_dim, res_block_num, groups, attention_head_dim, expansion_ratio, compression_ratio, up_sample, self_attention, ) ) self.conv_norm_out = nn.GroupNorm(groups, init_channels) self.conv_act_out = nn.SiLU() self.conv_out = nn.Conv2d(init_channels, out_channels, kernel_size=3, padding=1) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward(self, sample, timestep, encoder_hidden_states=None, encoder_attention_mask=None, return_dict=True): if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if not torch.is_tensor(timestep): dtype = torch.float32 if isinstance(timestep, float) else torch.int32 timestep = torch.tensor([timestep], dtype=dtype, device=sample.device) elif len(timestep.shape) == 0: timestep = timestep[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = timestep.expand(sample.shape[0]) time_embed_input = self.time_proj(timestep).to(sample.dtype) time_embed = self.time_embedding(time_embed_input) encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) if encoder_hidden_states is not None: time_embed = self.add_time_condition(time_embed, encoder_hidden_states, encoder_attention_mask) hidden_states = [] sample = self.conv_in(sample) for level, down_sample in enumerate(self.down_blocks): sample = down_sample(sample, time_embed, encoder_hidden_states, encoder_attention_mask) if level != self.num_levels - 1: hidden_states.append(sample) for level, up_sample in enumerate(self.up_blocks): if level != 0: sample = torch.cat([sample, hidden_states.pop()], dim=1) sample = up_sample(sample, time_embed, encoder_hidden_states, encoder_attention_mask) sample = self.conv_norm_out(sample) sample = self.conv_act_out(sample) sample = self.conv_out(sample) if not return_dict: return (sample,) return Kandinsky3UNetOutput(sample=sample) class Kandinsky3UpSampleBlock(nn.Module): def __init__( self, in_channels, cat_dim, out_channels, time_embed_dim, context_dim=None, num_blocks=3, groups=32, head_dim=64, expansion_ratio=4, compression_ratio=2, up_sample=True, self_attention=True, ): super().__init__() up_resolutions = [[None, True if up_sample else None, None, None]] + [[None] * 4] * (num_blocks - 1) hidden_channels = ( [(in_channels + cat_dim, in_channels)] + [(in_channels, in_channels)] * (num_blocks - 2) + [(in_channels, out_channels)] ) attentions = [] resnets_in = [] resnets_out = [] self.self_attention = self_attention self.context_dim = context_dim if self_attention: attentions.append( Kandinsky3AttentionBlock(out_channels, time_embed_dim, None, groups, head_dim, expansion_ratio) ) else: attentions.append(nn.Identity()) for (in_channel, out_channel), up_resolution in zip(hidden_channels, up_resolutions): resnets_in.append( Kandinsky3ResNetBlock(in_channel, in_channel, time_embed_dim, groups, compression_ratio, up_resolution) ) if context_dim is not None: attentions.append( Kandinsky3AttentionBlock( in_channel, time_embed_dim, context_dim, groups, head_dim, expansion_ratio ) ) else: attentions.append(nn.Identity()) resnets_out.append( Kandinsky3ResNetBlock(in_channel, out_channel, time_embed_dim, groups, compression_ratio) ) self.attentions = nn.ModuleList(attentions) self.resnets_in = nn.ModuleList(resnets_in) self.resnets_out = nn.ModuleList(resnets_out) def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): for attention, resnet_in, resnet_out in zip(self.attentions[1:], self.resnets_in, self.resnets_out): x = resnet_in(x, time_embed) if self.context_dim is not None: x = attention(x, time_embed, context, context_mask, image_mask) x = resnet_out(x, time_embed) if self.self_attention: x = self.attentions[0](x, time_embed, image_mask=image_mask) return x class Kandinsky3DownSampleBlock(nn.Module): def __init__( self, in_channels, out_channels, time_embed_dim, context_dim=None, num_blocks=3, groups=32, head_dim=64, expansion_ratio=4, compression_ratio=2, down_sample=True, self_attention=True, ): super().__init__() attentions = [] resnets_in = [] resnets_out = [] self.self_attention = self_attention self.context_dim = context_dim if self_attention: attentions.append( Kandinsky3AttentionBlock(in_channels, time_embed_dim, None, groups, head_dim, expansion_ratio) ) else: attentions.append(nn.Identity()) up_resolutions = [[None] * 4] * (num_blocks - 1) + [[None, None, False if down_sample else None, None]] hidden_channels = [(in_channels, out_channels)] + [(out_channels, out_channels)] * (num_blocks - 1) for (in_channel, out_channel), up_resolution in zip(hidden_channels, up_resolutions): resnets_in.append( Kandinsky3ResNetBlock(in_channel, out_channel, time_embed_dim, groups, compression_ratio) ) if context_dim is not None: attentions.append( Kandinsky3AttentionBlock( out_channel, time_embed_dim, context_dim, groups, head_dim, expansion_ratio ) ) else: attentions.append(nn.Identity()) resnets_out.append( Kandinsky3ResNetBlock( out_channel, out_channel, time_embed_dim, groups, compression_ratio, up_resolution ) ) self.attentions = nn.ModuleList(attentions) self.resnets_in = nn.ModuleList(resnets_in) self.resnets_out = nn.ModuleList(resnets_out) def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): if self.self_attention: x = self.attentions[0](x, time_embed, image_mask=image_mask) for attention, resnet_in, resnet_out in zip(self.attentions[1:], self.resnets_in, self.resnets_out): x = resnet_in(x, time_embed) if self.context_dim is not None: x = attention(x, time_embed, context, context_mask, image_mask) x = resnet_out(x, time_embed) return x class Kandinsky3ConditionalGroupNorm(nn.Module): def __init__(self, groups, normalized_shape, context_dim): super().__init__() self.norm = nn.GroupNorm(groups, normalized_shape, affine=False) self.context_mlp = nn.Sequential(nn.SiLU(), nn.Linear(context_dim, 2 * normalized_shape)) self.context_mlp[1].weight.data.zero_() self.context_mlp[1].bias.data.zero_() def forward(self, x, context): context = self.context_mlp(context) for _ in range(len(x.shape[2:])): context = context.unsqueeze(-1) scale, shift = context.chunk(2, dim=1) x = self.norm(x) * (scale + 1.0) + shift return x class Kandinsky3Block(nn.Module): def __init__(self, in_channels, out_channels, time_embed_dim, kernel_size=3, norm_groups=32, up_resolution=None): super().__init__() self.group_norm = Kandinsky3ConditionalGroupNorm(norm_groups, in_channels, time_embed_dim) self.activation = nn.SiLU() if up_resolution is not None and up_resolution: self.up_sample = nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2) else: self.up_sample = nn.Identity() padding = int(kernel_size > 1) self.projection = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding) if up_resolution is not None and not up_resolution: self.down_sample = nn.Conv2d(out_channels, out_channels, kernel_size=2, stride=2) else: self.down_sample = nn.Identity() def forward(self, x, time_embed): x = self.group_norm(x, time_embed) x = self.activation(x) x = self.up_sample(x) x = self.projection(x) x = self.down_sample(x) return x class Kandinsky3ResNetBlock(nn.Module): def __init__( self, in_channels, out_channels, time_embed_dim, norm_groups=32, compression_ratio=2, up_resolutions=4 * [None] ): super().__init__() kernel_sizes = [1, 3, 3, 1] hidden_channel = max(in_channels, out_channels) // compression_ratio hidden_channels = ( [(in_channels, hidden_channel)] + [(hidden_channel, hidden_channel)] * 2 + [(hidden_channel, out_channels)] ) self.resnet_blocks = nn.ModuleList( [ Kandinsky3Block(in_channel, out_channel, time_embed_dim, kernel_size, norm_groups, up_resolution) for (in_channel, out_channel), kernel_size, up_resolution in zip( hidden_channels, kernel_sizes, up_resolutions ) ] ) self.shortcut_up_sample = ( nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2) if True in up_resolutions else nn.Identity() ) self.shortcut_projection = ( nn.Conv2d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else nn.Identity() ) self.shortcut_down_sample = ( nn.Conv2d(out_channels, out_channels, kernel_size=2, stride=2) if False in up_resolutions else nn.Identity() ) def forward(self, x, time_embed): out = x for resnet_block in self.resnet_blocks: out = resnet_block(out, time_embed) x = self.shortcut_up_sample(x) x = self.shortcut_projection(x) x = self.shortcut_down_sample(x) x = x + out return x class Kandinsky3AttentionPooling(nn.Module): def __init__(self, num_channels, context_dim, head_dim=64): super().__init__() self.attention = Attention( context_dim, context_dim, dim_head=head_dim, out_dim=num_channels, out_bias=False, ) def forward(self, x, context, context_mask=None): context_mask = context_mask.to(dtype=context.dtype) context = self.attention(context.mean(dim=1, keepdim=True), context, context_mask) return x + context.squeeze(1) class Kandinsky3AttentionBlock(nn.Module): def __init__(self, num_channels, time_embed_dim, context_dim=None, norm_groups=32, head_dim=64, expansion_ratio=4): super().__init__() self.in_norm = Kandinsky3ConditionalGroupNorm(norm_groups, num_channels, time_embed_dim) self.attention = Attention( num_channels, context_dim or num_channels, dim_head=head_dim, out_dim=num_channels, out_bias=False, ) hidden_channels = expansion_ratio * num_channels self.out_norm = Kandinsky3ConditionalGroupNorm(norm_groups, num_channels, time_embed_dim) self.feed_forward = nn.Sequential( nn.Conv2d(num_channels, hidden_channels, kernel_size=1, bias=False), nn.SiLU(), nn.Conv2d(hidden_channels, num_channels, kernel_size=1, bias=False), ) def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): height, width = x.shape[-2:] out = self.in_norm(x, time_embed) out = out.reshape(x.shape[0], -1, height * width).permute(0, 2, 1) context = context if context is not None else out if context_mask is not None: context_mask = context_mask.to(dtype=context.dtype) out = self.attention(out, context, context_mask) out = out.permute(0, 2, 1).unsqueeze(-1).reshape(out.shape[0], -1, height, width) x = x + out out = self.out_norm(x, time_embed) out = self.feed_forward(out) x = x + out return x
diffusers/src/diffusers/models/unets/unet_kandinsky3.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_kandinsky3.py", "repo_id": "diffusers", "token_count": 9647 }
118
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["multicontrolnet"] = ["MultiControlNetModel"] _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/__init__.py", "repo_id": "diffusers", "token_count": 1294 }
119
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = { "timesteps": [ "fast27_timesteps", "smart100_timesteps", "smart185_timesteps", "smart27_timesteps", "smart50_timesteps", "super100_timesteps", "super27_timesteps", "super40_timesteps", ] } try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_if"] = ["IFPipeline"] _import_structure["pipeline_if_img2img"] = ["IFImg2ImgPipeline"] _import_structure["pipeline_if_img2img_superresolution"] = ["IFImg2ImgSuperResolutionPipeline"] _import_structure["pipeline_if_inpainting"] = ["IFInpaintingPipeline"] _import_structure["pipeline_if_inpainting_superresolution"] = ["IFInpaintingSuperResolutionPipeline"] _import_structure["pipeline_if_superresolution"] = ["IFSuperResolutionPipeline"] _import_structure["pipeline_output"] = ["IFPipelineOutput"] _import_structure["safety_checker"] = ["IFSafetyChecker"] _import_structure["watermark"] = ["IFWatermarker"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_if import IFPipeline from .pipeline_if_img2img import IFImg2ImgPipeline from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .timesteps import ( fast27_timesteps, smart27_timesteps, smart50_timesteps, smart100_timesteps, smart185_timesteps, super27_timesteps, super40_timesteps, super100_timesteps, ) from .watermark import IFWatermarker else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deepfloyd_if/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deepfloyd_if/__init__.py", "repo_id": "diffusers", "token_count": 1266 }
120
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): @register_to_config def __init__( self, max_length: int, vocab_size: int, d_model: int, dropout_rate: float, num_layers: int, num_heads: int, d_kv: int, d_ff: int, feed_forward_proj: str, is_decoder: bool = False, ): super().__init__() self.token_embedder = nn.Embedding(vocab_size, d_model) self.position_encoding = nn.Embedding(max_length, d_model) self.position_encoding.weight.requires_grad = False self.dropout_pre = nn.Dropout(p=dropout_rate) t5config = T5Config( vocab_size=vocab_size, d_model=d_model, num_heads=num_heads, d_kv=d_kv, d_ff=d_ff, dropout_rate=dropout_rate, feed_forward_proj=feed_forward_proj, is_decoder=is_decoder, is_encoder_decoder=False, ) self.encoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = T5Block(t5config) self.encoders.append(lyr) self.layer_norm = T5LayerNorm(d_model) self.dropout_post = nn.Dropout(p=dropout_rate) def forward(self, encoder_input_tokens, encoder_inputs_mask): x = self.token_embedder(encoder_input_tokens) seq_length = encoder_input_tokens.shape[1] inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) x += self.position_encoding(inputs_positions) x = self.dropout_pre(x) # inverted the attention mask input_shape = encoder_input_tokens.size() extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) for lyr in self.encoders: x = lyr(x, extended_attention_mask)[0] x = self.layer_norm(x) return self.dropout_post(x), encoder_inputs_mask
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py", "repo_id": "diffusers", "token_count": 1254 }
121
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Versatile Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModelWithProjection image_unet: UNet2DConditionModel text_unet: UNetFlatConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers _optional_components = ["text_unet"] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, image_unet: UNet2DConditionModel, text_unet: UNetFlatConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if self.text_unet is not None: self._swap_unet_attention_blocks() def _swap_unet_attention_blocks(self): """ Swap the `Transformer2DModel` blocks between the image and text UNets """ for name, module in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): parent_name, index = name.rsplit(".", 1) index = int(index) self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = ( self.text_unet.get_submodule(parent_name)[index], self.image_unet.get_submodule(parent_name)[index], ) def remove_unused_weights(self): self.register_modules(text_unet=None) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ def normalize_embeddings(encoder_output): embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) embeds_pooled = encoder_output.text_embeds embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) return embeds batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = normalize_embeddings(prompt_embeds) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionTextToImagePipeline >>> import torch >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe.remove_unused_weights() >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0] >>> image.save("./astronaut.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py", "repo_id": "diffusers", "token_count": 9810 }
122
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput from transformers.utils import logging from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class LDMTextToImagePipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using latent diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" def __init__( self, vqvae: Union[VQModel, AutoencoderKL], bert: PreTrainedModel, tokenizer: PreTrainedTokenizer, unet: Union[UNet2DModel, UNet2DConditionModel], scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], ): super().__init__() self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 1.0, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[Tuple, ImagePipelineOutput]: r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 1.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> from diffusers import DiffusionPipeline >>> # load model and scheduler >>> ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") >>> # run pipeline in inference (sample random noise and denoise) >>> prompt = "A painting of a squirrel eating a burger" >>> images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images >>> # save images >>> for idx, image in enumerate(images): ... image.save(f"squirrel-{idx}.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # get unconditional embeddings for classifier free guidance if guidance_scale != 1.0: uncond_input = self.tokenizer( [""] * batch_size, padding="max_length", max_length=77, truncation=True, return_tensors="pt" ) negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0] # get prompt text embeddings text_input = self.tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt") prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0] # get the initial random noise unless the user supplied it latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor( latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self._execution_device) self.scheduler.set_timesteps(num_inference_steps) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_kwargs = {} if accepts_eta: extra_kwargs["eta"] = eta for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale == 1.0: # guidance_scale of 1 means no guidance latents_input = latents context = prompt_embeds else: # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = torch.cat([latents] * 2) context = torch.cat([negative_prompt_embeds, prompt_embeds]) # predict the noise residual noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample # perform guidance if guidance_scale != 1.0: noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample # scale and decode the image latents with vae latents = 1 / self.vqvae.config.scaling_factor * latents image = self.vqvae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) ################################################################################ # Code for the text transformer model ################################################################################ """ PyTorch LDMBERT model.""" logger = logging.get_logger(__name__) LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "ldm-bert", # See all LDMBert models at https://huggingface.co/models?filter=ldmbert ] LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json", } """ LDMBERT model configuration""" class LDMBertConfig(PretrainedConfig): model_type = "ldmbert" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=30522, max_position_embeddings=77, encoder_layers=32, encoder_ffn_dim=5120, encoder_attention_heads=8, head_dim=64, encoder_layerdrop=0.0, activation_function="gelu", d_model=1280, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.head_dim = head_dim self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__(pad_token_id=pad_token_id, **kwargs) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert class LDMBertAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, head_dim: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = False, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = head_dim self.inner_dim = head_dim * num_heads self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.out_proj = nn.Linear(self.inner_dim, embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class LDMBertEncoderLayer(nn.Module): def __init__(self, config: LDMBertConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = LDMBertAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, head_dim=config.head_dim, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert class LDMBertPreTrainedModel(PreTrainedModel): config_class = LDMBertConfig base_model_prefix = "model" _supports_gradient_checkpointing = True _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LDMBertEncoder,)): module.gradient_checkpointing = value @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs class LDMBertEncoder(LDMBertPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`LDMBertEncoderLayer`]. Args: config: LDMBertConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: LDMBertConfig): super().__init__(config) self.dropout = config.dropout embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) seq_len = input_shape[1] if position_ids is None: position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class LDMBertModel(LDMBertPreTrainedModel): _no_split_modules = [] def __init__(self, config: LDMBertConfig): super().__init__(config) self.model = LDMBertEncoder(config) self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs
diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py", "repo_id": "diffusers", "token_count": 14315 }
123
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import fnmatch import importlib import inspect import os import re import sys from dataclasses import dataclass from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import requests import torch from huggingface_hub import ( ModelCard, create_repo, hf_hub_download, model_info, snapshot_download, ) from huggingface_hub.utils import OfflineModeIsEnabled, validate_hf_hub_args from packaging import version from requests.exceptions import HTTPError from tqdm.auto import tqdm from .. import __version__ from ..configuration_utils import ConfigMixin from ..models import AutoencoderKL from ..models.attention_processor import FusedAttnProcessor2_0 from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from ..utils import ( CONFIG_NAME, DEPRECATED_REVISION_ARGS, BaseOutput, PushToHubMixin, deprecate, is_accelerate_available, is_accelerate_version, is_torch_npu_available, is_torch_version, logging, numpy_to_pil, ) from ..utils.hub_utils import load_or_create_model_card, populate_model_card from ..utils.torch_utils import is_compiled_module if is_torch_npu_available(): import torch_npu # noqa: F401 from .pipeline_loading_utils import ( ALL_IMPORTABLE_CLASSES, CONNECTED_PIPES_KEYS, CUSTOM_PIPELINE_FILE_NAME, LOADABLE_CLASSES, _fetch_class_library_tuple, _get_pipeline_class, _unwrap_model, is_safetensors_compatible, load_sub_model, maybe_raise_or_warn, variant_compatible_siblings, warn_deprecated_model_variant, ) if is_accelerate_available(): import accelerate LIBRARIES = [] for library in LOADABLE_CLASSES: LIBRARIES.append(library) logger = logging.get_logger(__name__) @dataclass class ImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray] @dataclass class AudioPipelineOutput(BaseOutput): """ Output class for audio pipelines. Args: audios (`np.ndarray`) List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`. """ audios: np.ndarray class DiffusionPipeline(ConfigMixin, PushToHubMixin): r""" Base class for all pipelines. [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and provides methods for loading, downloading and saving models. It also includes methods to: - move all PyTorch modules to the device of your choice - enable/disable the progress bar for the denoising iteration Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the pipeline to function (should be overridden by subclasses). """ config_name = "model_index.json" model_cpu_offload_seq = None _optional_components = [] _exclude_from_cpu_offload = [] _load_connected_pipes = False _is_onnx = False def register_modules(self, **kwargs): for name, module in kwargs.items(): # retrieve library if module is None or isinstance(module, (tuple, list)) and module[0] is None: register_dict = {name: (None, None)} else: library, class_name = _fetch_class_library_tuple(module) register_dict = {name: (library, class_name)} # save model index config self.register_to_config(**register_dict) # set models setattr(self, name, module) def __setattr__(self, name: str, value: Any): if name in self.__dict__ and hasattr(self.config, name): # We need to overwrite the config if name exists in config if isinstance(getattr(self.config, name), (tuple, list)): if value is not None and self.config[name][0] is not None: class_library_tuple = _fetch_class_library_tuple(value) else: class_library_tuple = (None, None) self.register_to_config(**{name: class_library_tuple}) else: self.register_to_config(**{name: value}) super().__setattr__(name, value) def save_pretrained( self, save_directory: Union[str, os.PathLike], safe_serialization: bool = True, variant: Optional[str] = None, push_to_hub: bool = False, **kwargs, ): """ Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading method. The pipeline is easily reloaded using the [`~DiffusionPipeline.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to save a pipeline to. Will be created if it doesn't exist. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ model_index_dict = dict(self.config) model_index_dict.pop("_class_name", None) model_index_dict.pop("_diffusers_version", None) model_index_dict.pop("_module", None) model_index_dict.pop("_name_or_path", None) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", False) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id expected_modules, optional_kwargs = self._get_signature_keys(self) def is_saveable_module(name, value): if name not in expected_modules: return False if name in self._optional_components and value[0] is None: return False return True model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)} for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) model_cls = sub_model.__class__ # Dynamo wraps the original model in a private class. # I didn't find a public API to get the original class. if is_compiled_module(sub_model): sub_model = _unwrap_model(sub_model) model_cls = sub_model.__class__ save_method_name = None # search for the model's base class in LOADABLE_CLASSES for library_name, library_classes in LOADABLE_CLASSES.items(): if library_name in sys.modules: library = importlib.import_module(library_name) else: logger.info( f"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}" ) for base_class, save_load_methods in library_classes.items(): class_candidate = getattr(library, base_class, None) if class_candidate is not None and issubclass(model_cls, class_candidate): # if we found a suitable base class in LOADABLE_CLASSES then grab its save method save_method_name = save_load_methods[0] break if save_method_name is not None: break if save_method_name is None: logger.warning( f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved." ) # make sure that unsaveable components are not tried to be loaded afterward self.register_to_config(**{pipeline_component_name: (None, None)}) continue save_method = getattr(sub_model, save_method_name) # Call the save method with the argument safe_serialization only if it's supported save_method_signature = inspect.signature(save_method) save_method_accept_safe = "safe_serialization" in save_method_signature.parameters save_method_accept_variant = "variant" in save_method_signature.parameters save_kwargs = {} if save_method_accept_safe: save_kwargs["safe_serialization"] = safe_serialization if save_method_accept_variant: save_kwargs["variant"] = variant save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs) # finally save the config self.save_config(save_directory) if push_to_hub: # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token, is_pipeline=True) model_card = populate_model_card(model_card) model_card.save(os.path.join(save_directory, "README.md")) self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, ) def to(self, *args, **kwargs): r""" Performs Pipeline dtype and/or device conversion. A torch.dtype and torch.device are inferred from the arguments of `self.to(*args, **kwargs).` <Tip> If the pipeline already has the correct torch.dtype and torch.device, then it is returned as is. Otherwise, the returned pipeline is a copy of self with the desired torch.dtype and torch.device. </Tip> Here are the ways to call `to`: - `to(dtype, silence_dtype_warnings=False) โ†’ DiffusionPipeline` to return a pipeline with the specified [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) - `to(device, silence_dtype_warnings=False) โ†’ DiffusionPipeline` to return a pipeline with the specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) - `to(device=None, dtype=None, silence_dtype_warnings=False) โ†’ DiffusionPipeline` to return a pipeline with the specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) and [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) Arguments: dtype (`torch.dtype`, *optional*): Returns a pipeline with the specified [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) device (`torch.Device`, *optional*): Returns a pipeline with the specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) silence_dtype_warnings (`str`, *optional*, defaults to `False`): Whether to omit warnings if the target `dtype` is not compatible with the target `device`. Returns: [`DiffusionPipeline`]: The pipeline converted to specified `dtype` and/or `dtype`. """ dtype = kwargs.pop("dtype", None) device = kwargs.pop("device", None) silence_dtype_warnings = kwargs.pop("silence_dtype_warnings", False) dtype_arg = None device_arg = None if len(args) == 1: if isinstance(args[0], torch.dtype): dtype_arg = args[0] else: device_arg = torch.device(args[0]) if args[0] is not None else None elif len(args) == 2: if isinstance(args[0], torch.dtype): raise ValueError( "When passing two arguments, make sure the first corresponds to `device` and the second to `dtype`." ) device_arg = torch.device(args[0]) if args[0] is not None else None dtype_arg = args[1] elif len(args) > 2: raise ValueError("Please make sure to pass at most two arguments (`device` and `dtype`) `.to(...)`") if dtype is not None and dtype_arg is not None: raise ValueError( "You have passed `dtype` both as an argument and as a keyword argument. Please only pass one of the two." ) dtype = dtype or dtype_arg if device is not None and device_arg is not None: raise ValueError( "You have passed `device` both as an argument and as a keyword argument. Please only pass one of the two." ) device = device or device_arg # throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU. def module_is_sequentially_offloaded(module): if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"): return False return hasattr(module, "_hf_hook") and not isinstance( module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook) ) def module_is_offloaded(module): if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"): return False return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload) # .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer pipeline_is_sequentially_offloaded = any( module_is_sequentially_offloaded(module) for _, module in self.components.items() ) if pipeline_is_sequentially_offloaded and device and torch.device(device).type == "cuda": raise ValueError( "It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading." ) # Display a warning in this case (the operation succeeds but the benefits are lost) pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items()) if pipeline_is_offloaded and device and torch.device(device).type == "cuda": logger.warning( f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading." ) module_names, _ = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded for module in modules: is_loaded_in_8bit = hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit if is_loaded_in_8bit and dtype is not None: logger.warning( f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {dtype} is not yet supported. Module is still in 8bit precision." ) if is_loaded_in_8bit and device is not None: logger.warning( f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {dtype} via `.to()` is not yet supported. Module is still on {module.device}." ) else: module.to(device, dtype) if ( module.dtype == torch.float16 and str(device) in ["cpu"] and not silence_dtype_warnings and not is_offloaded ): logger.warning( "Pipelines loaded with `dtype=torch.float16` cannot run with `cpu` device. It" " is not recommended to move them to `cpu` as running them will fail. Please make" " sure to use an accelerator to run the pipeline in inference, due to the lack of" " support for`float16` operations on this device in PyTorch. Please, remove the" " `torch_dtype=torch.float16` argument, or use another device for inference." ) return self @property def device(self) -> torch.device: r""" Returns: `torch.device`: The torch device on which the pipeline is located. """ module_names, _ = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] for module in modules: return module.device return torch.device("cpu") @property def dtype(self) -> torch.dtype: r""" Returns: `torch.dtype`: The torch dtype on which the pipeline is located. """ module_names, _ = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] for module in modules: return module.dtype return torch.float32 @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights. The pipeline is set in evaluation mode (`model.eval()`) by default. If you get the error message below, you need to finetune the weights for your downstream task: ``` Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. custom_pipeline (`str`, *optional*): <Tip warning={true}> ๐Ÿงช This is an experimental feature and may change in the future. </Tip> Can be either: - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines the custom pipeline. - A string, the *file name* of a community pipeline hosted on GitHub under [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file names must match the file name and not the pipeline script (`clip_guided_stable_diffusion` instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the current main branch of GitHub. - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory must contain a file called `pipeline.py` that defines the custom pipeline. For more information on how to load and create custom pipelines, please have a look at [Loading and Adding Custom Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. Defaults to the latest stable ๐Ÿค— Diffusers version. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if youโ€™re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesnโ€™t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have ๐Ÿค— Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. use_onnx (`bool`, *optional*, defaults to `None`): If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending with `.onnx` and `.pb`. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from diffusers import DiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") >>> # Download pipeline that requires an authorization token >>> # For more information on access tokens, please refer to this section >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # Use a different scheduler >>> from diffusers import LMSDiscreteScheduler >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) >>> pipeline.scheduler = scheduler ``` """ cache_dir = kwargs.pop("cache_dir", None) resume_download = kwargs.pop("resume_download", False) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) from_flax = kwargs.pop("from_flax", False) torch_dtype = kwargs.pop("torch_dtype", None) custom_pipeline = kwargs.pop("custom_pipeline", None) custom_revision = kwargs.pop("custom_revision", None) provider = kwargs.pop("provider", None) sess_options = kwargs.pop("sess_options", None) device_map = kwargs.pop("device_map", None) max_memory = kwargs.pop("max_memory", None) offload_folder = kwargs.pop("offload_folder", None) offload_state_dict = kwargs.pop("offload_state_dict", False) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) use_onnx = kwargs.pop("use_onnx", None) load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) if low_cpu_mem_usage and not is_accelerate_available(): low_cpu_mem_usage = False logger.warning( "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" " install accelerate\n```\n." ) if device_map is not None and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" " `device_map=None`." ) if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): raise NotImplementedError( "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" " `low_cpu_mem_usage=False`." ) if low_cpu_mem_usage is False and device_map is not None: raise ValueError( f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and" " dispatching. Please make sure to set `low_cpu_mem_usage=True`." ) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): if pretrained_model_name_or_path.count("/") > 1: raise ValueError( f'The provided pretrained_model_name_or_path "{pretrained_model_name_or_path}"' " is neither a valid local path nor a valid repo id. Please check the parameter." ) cached_folder = cls.download( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, from_flax=from_flax, use_safetensors=use_safetensors, use_onnx=use_onnx, custom_pipeline=custom_pipeline, custom_revision=custom_revision, variant=variant, load_connected_pipeline=load_connected_pipeline, **kwargs, ) else: cached_folder = pretrained_model_name_or_path config_dict = cls.load_config(cached_folder) # pop out "_ignore_files" as it is only needed for download config_dict.pop("_ignore_files", None) # 2. Define which model components should load variants # We retrieve the information by matching whether variant # model checkpoints exist in the subfolders model_variants = {} if variant is not None: for folder in os.listdir(cached_folder): folder_path = os.path.join(cached_folder, folder) is_folder = os.path.isdir(folder_path) and folder in config_dict variant_exists = is_folder and any( p.split(".")[1].startswith(variant) for p in os.listdir(folder_path) ) if variant_exists: model_variants[folder] = variant # 3. Load the pipeline class, if using custom module then load it from the hub # if we load from explicit class, let's use it custom_class_name = None if os.path.isfile(os.path.join(cached_folder, f"{custom_pipeline}.py")): custom_pipeline = os.path.join(cached_folder, f"{custom_pipeline}.py") elif isinstance(config_dict["_class_name"], (list, tuple)) and os.path.isfile( os.path.join(cached_folder, f"{config_dict['_class_name'][0]}.py") ): custom_pipeline = os.path.join(cached_folder, f"{config_dict['_class_name'][0]}.py") custom_class_name = config_dict["_class_name"][1] pipeline_class = _get_pipeline_class( cls, config_dict, load_connected_pipeline=load_connected_pipeline, custom_pipeline=custom_pipeline, class_name=custom_class_name, cache_dir=cache_dir, revision=custom_revision, ) # DEPRECATED: To be removed in 1.0.0 if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse( version.parse(config_dict["_diffusers_version"]).base_version ) <= version.parse("0.5.1"): from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy pipeline_class = StableDiffusionInpaintPipelineLegacy deprecation_message = ( "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" f" checkpoint {pretrained_model_name_or_path} to the format of" " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." ) deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) # 4. Define expected modules given pipeline signature # and define non-None initialized modules (=`init_kwargs`) # some modules can be passed directly to the init # in this case they are already instantiated in `kwargs` # extract them here expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) # define init kwargs and make sure that optional component modules are filtered out init_kwargs = { k: init_dict.pop(k) for k in optional_kwargs if k in init_dict and k not in pipeline_class._optional_components } init_kwargs = {**init_kwargs, **passed_pipe_kwargs} # remove `null` components def load_module(name, value): if value[0] is None: return False if name in passed_class_obj and passed_class_obj[name] is None: return False return True init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} # Special case: safety_checker must be loaded separately when using `from_flax` if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj: raise NotImplementedError( "The safety checker cannot be automatically loaded when loading weights `from_flax`." " Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker" " separately if you need it." ) # 5. Throw nice warnings / errors for fast accelerate loading if len(unused_kwargs) > 0: logger.warning( f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." ) # import it here to avoid circular import from diffusers import pipelines # 6. Load each module in the pipeline for name, (library_name, class_name) in logging.tqdm(init_dict.items(), desc="Loading pipeline components..."): # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names class_name = class_name[4:] if class_name.startswith("Flax") else class_name # 6.2 Define all importable classes is_pipeline_module = hasattr(pipelines, library_name) importable_classes = ALL_IMPORTABLE_CLASSES loaded_sub_model = None # 6.3 Use passed sub model or load class_name from library_name if name in passed_class_obj: # if the model is in a pipeline module, then we load it from the pipeline # check that passed_class_obj has correct parent class maybe_raise_or_warn( library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module ) loaded_sub_model = passed_class_obj[name] else: # load sub model loaded_sub_model = load_sub_model( library_name=library_name, class_name=class_name, importable_classes=importable_classes, pipelines=pipelines, is_pipeline_module=is_pipeline_module, pipeline_class=pipeline_class, torch_dtype=torch_dtype, provider=provider, sess_options=sess_options, device_map=device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, model_variants=model_variants, name=name, from_flax=from_flax, variant=variant, low_cpu_mem_usage=low_cpu_mem_usage, cached_folder=cached_folder, ) logger.info( f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}." ) init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")): modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} load_kwargs = { "cache_dir": cache_dir, "resume_download": resume_download, "force_download": force_download, "proxies": proxies, "local_files_only": local_files_only, "token": token, "revision": revision, "torch_dtype": torch_dtype, "custom_pipeline": custom_pipeline, "custom_revision": custom_revision, "provider": provider, "sess_options": sess_options, "device_map": device_map, "max_memory": max_memory, "offload_folder": offload_folder, "offload_state_dict": offload_state_dict, "low_cpu_mem_usage": low_cpu_mem_usage, "variant": variant, "use_safetensors": use_safetensors, } def get_connected_passed_kwargs(prefix): connected_passed_class_obj = { k.replace(f"{prefix}_", ""): w for k, w in passed_class_obj.items() if k.split("_")[0] == prefix } connected_passed_pipe_kwargs = { k.replace(f"{prefix}_", ""): w for k, w in passed_pipe_kwargs.items() if k.split("_")[0] == prefix } connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs} return connected_passed_kwargs connected_pipes = { prefix: DiffusionPipeline.from_pretrained( repo_id, **load_kwargs.copy(), **get_connected_passed_kwargs(prefix) ) for prefix, repo_id in connected_pipes.items() if repo_id is not None } for prefix, connected_pipe in connected_pipes.items(): # add connected pipes to `init_kwargs` with <prefix>_<component_name>, e.g. "prior_text_encoder" init_kwargs.update( {"_".join([prefix, name]): component for name, component in connected_pipe.components.items()} ) # 7. Potentially add passed objects if expected missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) optional_modules = pipeline_class._optional_components if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): for module in missing_modules: init_kwargs[module] = passed_class_obj.get(module, None) elif len(missing_modules) > 0: passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs raise ValueError( f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." ) # 8. Instantiate the pipeline model = pipeline_class(**init_kwargs) # 9. Save where the model was instantiated from model.register_to_config(_name_or_path=pretrained_model_name_or_path) return model @property def name_or_path(self) -> str: return getattr(self.config, "_name_or_path", None) @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from Accelerate's module hooks. """ for name, model in self.components.items(): if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload: continue if not hasattr(model, "_hf_hook"): return self.device for module in model.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. Arguments: gpu_id (`int`, *optional*): The ID of the accelerator that shall be used in inference. If not specified, it will default to 0. device (`torch.Device` or `str`, *optional*, defaults to "cuda"): The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will default to "cuda". """ if self.model_cpu_offload_seq is None: raise ValueError( "Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set." ) if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") torch_device = torch.device(device) device_index = torch_device.index if gpu_id is not None and device_index is not None: raise ValueError( f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" ) # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0 self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0) device_type = torch_device.type device = torch.device(f"{device_type}:{self._offload_gpu_id}") self._offload_device = device if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) device_mod = getattr(torch, self.device.type, None) if hasattr(device_mod, "empty_cache") and device_mod.is_available(): device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)} self._all_hooks = [] hook = None for model_str in self.model_cpu_offload_seq.split("->"): model = all_model_components.pop(model_str, None) if not isinstance(model, torch.nn.Module): continue _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook) self._all_hooks.append(hook) # CPU offload models that are not in the seq chain unless they are explicitly excluded # these models will stay on CPU until maybe_free_model_hooks is called # some models cannot be in the seq chain because they are iteratively called, such as controlnet for name, model in all_model_components.items(): if not isinstance(model, torch.nn.Module): continue if name in self._exclude_from_cpu_offload: model.to(device) else: _, hook = cpu_offload_with_hook(model, device) self._all_hooks.append(hook) def maybe_free_model_hooks(self): r""" Function that offloads all components, removes all model hooks that were added when using `enable_model_cpu_offload` and then applies them again. In case the model has not been offloaded this function is a no-op. Make sure to add this function to the end of the `__call__` function of your pipeline so that it functions correctly when applying enable_model_cpu_offload. """ if not hasattr(self, "_all_hooks") or len(self._all_hooks) == 0: # `enable_model_cpu_offload` has not be called, so silently do nothing return for hook in self._all_hooks: # offload model and remove hook from model hook.offload() hook.remove() # make sure the model is in the same state as before calling it self.enable_model_cpu_offload(device=getattr(self, "_offload_device", "cuda")) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using ๐Ÿค— Accelerate, significantly reducing memory usage. When called, the state dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward` method called. Offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. Arguments: gpu_id (`int`, *optional*): The ID of the accelerator that shall be used in inference. If not specified, it will default to 0. device (`torch.Device` or `str`, *optional*, defaults to "cuda"): The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will default to "cuda". """ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): from accelerate import cpu_offload else: raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher") torch_device = torch.device(device) device_index = torch_device.index if gpu_id is not None and device_index is not None: raise ValueError( f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" ) # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0 self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0) device_type = torch_device.type device = torch.device(f"{device_type}:{self._offload_gpu_id}") self._offload_device = device if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) device_mod = getattr(torch, self.device.type, None) if hasattr(device_mod, "empty_cache") and device_mod.is_available(): device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) for name, model in self.components.items(): if not isinstance(model, torch.nn.Module): continue if name in self._exclude_from_cpu_offload: model.to(device) else: # make sure to offload buffers if not all high level weights # are of type nn.Module offload_buffers = len(model._parameters) > 0 cpu_offload(model, device, offload_buffers=offload_buffers) @classmethod @validate_hf_hub_args def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: r""" Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights. Parameters: pretrained_model_name (`str` or `os.PathLike`, *optional*): A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. custom_pipeline (`str`, *optional*): Can be either: - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines the custom pipeline. - A string, the *file name* of a community pipeline hosted on GitHub under [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file names must match the file name and not the pipeline script (`clip_guided_stable_diffusion` instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the current `main` branch of GitHub. - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory must contain a file called `pipeline.py` that defines the custom pipeline. <Tip warning={true}> ๐Ÿงช This is an experimental feature and may change in the future. </Tip> For more information on how to load and create custom pipelines, take a look at [How to contribute a community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a ๐Ÿค— Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. use_onnx (`bool`, *optional*, defaults to `False`): If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending with `.onnx` and `.pb`. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom pipelines and components defined on the Hub in their own files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. Returns: `os.PathLike`: A path to the downloaded pipeline. <Tip> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `huggingface-cli login`. </Tip> """ cache_dir = kwargs.pop("cache_dir", None) resume_download = kwargs.pop("resume_download", False) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) from_flax = kwargs.pop("from_flax", False) custom_pipeline = kwargs.pop("custom_pipeline", None) custom_revision = kwargs.pop("custom_revision", None) variant = kwargs.pop("variant", None) use_safetensors = kwargs.pop("use_safetensors", None) use_onnx = kwargs.pop("use_onnx", None) load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) trust_remote_code = kwargs.pop("trust_remote_code", False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True allow_patterns = None ignore_patterns = None model_info_call_error: Optional[Exception] = None if not local_files_only: try: info = model_info(pretrained_model_name, token=token, revision=revision) except (HTTPError, OfflineModeIsEnabled, requests.ConnectionError) as e: logger.warning(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.") local_files_only = True model_info_call_error = e # save error to reraise it if model is not cached locally if not local_files_only: config_file = hf_hub_download( pretrained_model_name, cls.config_name, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, resume_download=resume_download, token=token, ) config_dict = cls._dict_from_json_file(config_file) ignore_filenames = config_dict.pop("_ignore_files", []) # retrieve all folder_names that contain relevant files folder_names = [k for k, v in config_dict.items() if isinstance(v, list) and k != "_class_name"] filenames = {sibling.rfilename for sibling in info.siblings} model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant) diffusers_module = importlib.import_module(__name__.split(".")[0]) pipelines = getattr(diffusers_module, "pipelines") # optionally create a custom component <> custom file mapping custom_components = {} for component in folder_names: module_candidate = config_dict[component][0] if module_candidate is None or not isinstance(module_candidate, str): continue # We compute candidate file path on the Hub. Do not use `os.path.join`. candidate_file = f"{component}/{module_candidate}.py" if candidate_file in filenames: custom_components[component] = module_candidate elif module_candidate not in LOADABLE_CLASSES and not hasattr(pipelines, module_candidate): raise ValueError( f"{candidate_file} as defined in `model_index.json` does not exist in {pretrained_model_name} and is not a module in 'diffusers/pipelines'." ) if len(variant_filenames) == 0 and variant is not None: deprecation_message = ( f"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available." f"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`" "if such variant modeling files are not available. Doing so will lead to an error in v0.24.0 as defaulting to non-variant" "modeling files is deprecated." ) deprecate("no variant default", "0.24.0", deprecation_message, standard_warn=False) # remove ignored filenames model_filenames = set(model_filenames) - set(ignore_filenames) variant_filenames = set(variant_filenames) - set(ignore_filenames) # if the whole pipeline is cached we don't have to ping the Hub if revision in DEPRECATED_REVISION_ARGS and version.parse( version.parse(__version__).base_version ) >= version.parse("0.22.0"): warn_deprecated_model_variant(pretrained_model_name, token, variant, revision, model_filenames) model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names} custom_class_name = None if custom_pipeline is None and isinstance(config_dict["_class_name"], (list, tuple)): custom_pipeline = config_dict["_class_name"][0] custom_class_name = config_dict["_class_name"][1] # all filenames compatible with variant will be added allow_patterns = list(model_filenames) # allow all patterns from non-model folders # this enables downloading schedulers, tokenizers, ... allow_patterns += [f"{k}/*" for k in folder_names if k not in model_folder_names] # add custom component files allow_patterns += [f"{k}/{f}.py" for k, f in custom_components.items()] # add custom pipeline file allow_patterns += [f"{custom_pipeline}.py"] if f"{custom_pipeline}.py" in filenames else [] # also allow downloading config.json files with the model allow_patterns += [os.path.join(k, "config.json") for k in model_folder_names] allow_patterns += [ SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name, CUSTOM_PIPELINE_FILE_NAME, ] load_pipe_from_hub = custom_pipeline is not None and f"{custom_pipeline}.py" in filenames load_components_from_hub = len(custom_components) > 0 if load_pipe_from_hub and not trust_remote_code: raise ValueError( f"The repository for {pretrained_model_name} contains custom code in {custom_pipeline}.py which must be executed to correctly " f"load the model. You can inspect the repository content at https://hf.co/{pretrained_model_name}/blob/main/{custom_pipeline}.py.\n" f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." ) if load_components_from_hub and not trust_remote_code: raise ValueError( f"The repository for {pretrained_model_name} contains custom code in {'.py, '.join([os.path.join(k, v) for k,v in custom_components.items()])} which must be executed to correctly " f"load the model. You can inspect the repository content at {', '.join([f'https://hf.co/{pretrained_model_name}/{k}/{v}.py' for k,v in custom_components.items()])}.\n" f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." ) # retrieve passed components that should not be downloaded pipeline_class = _get_pipeline_class( cls, config_dict, load_connected_pipeline=load_connected_pipeline, custom_pipeline=custom_pipeline, repo_id=pretrained_model_name if load_pipe_from_hub else None, hub_revision=revision, class_name=custom_class_name, cache_dir=cache_dir, revision=custom_revision, ) expected_components, _ = cls._get_signature_keys(pipeline_class) passed_components = [k for k in expected_components if k in kwargs] if ( use_safetensors and not allow_pickle and not is_safetensors_compatible( model_filenames, variant=variant, passed_components=passed_components ) ): raise EnvironmentError( f"Could not find the necessary `safetensors` weights in {model_filenames} (variant={variant})" ) if from_flax: ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"] elif use_safetensors and is_safetensors_compatible( model_filenames, variant=variant, passed_components=passed_components ): ignore_patterns = ["*.bin", "*.msgpack"] use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx if not use_onnx: ignore_patterns += ["*.onnx", "*.pb"] safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")} safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")} if ( len(safetensors_variant_filenames) > 0 and safetensors_model_filenames != safetensors_variant_filenames ): logger.warning( f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." ) else: ignore_patterns = ["*.safetensors", "*.msgpack"] use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx if not use_onnx: ignore_patterns += ["*.onnx", "*.pb"] bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")} bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")} if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: logger.warning( f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." ) # Don't download any objects that are passed allow_patterns = [ p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components) ] if pipeline_class._load_connected_pipes: allow_patterns.append("README.md") # Don't download index files of forbidden patterns either ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns] re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns] re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns] expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)] expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)] snapshot_folder = Path(config_file).parent pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) if pipeline_is_cached and not force_download: # if the pipeline is cached, we can directly return it # else call snapshot_download return snapshot_folder user_agent = {"pipeline_class": cls.__name__} if custom_pipeline is not None and not custom_pipeline.endswith(".py"): user_agent["custom_pipeline"] = custom_pipeline # download all allow_patterns - ignore_patterns try: cached_folder = snapshot_download( pretrained_model_name, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, user_agent=user_agent, ) # retrieve pipeline class from local file cls_name = cls.load_config(os.path.join(cached_folder, "model_index.json")).get("_class_name", None) cls_name = cls_name[4:] if isinstance(cls_name, str) and cls_name.startswith("Flax") else cls_name diffusers_module = importlib.import_module(__name__.split(".")[0]) pipeline_class = getattr(diffusers_module, cls_name, None) if isinstance(cls_name, str) else None if pipeline_class is not None and pipeline_class._load_connected_pipes: modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], []) for connected_pipe_repo_id in connected_pipes: download_kwargs = { "cache_dir": cache_dir, "resume_download": resume_download, "force_download": force_download, "proxies": proxies, "local_files_only": local_files_only, "token": token, "variant": variant, "use_safetensors": use_safetensors, } DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs) return cached_folder except FileNotFoundError: # Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache. # This can happen in two cases: # 1. If the user passed `local_files_only=True` => we raise the error directly # 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error if model_info_call_error is None: # 1. user passed `local_files_only=True` raise else: # 2. we forced `local_files_only=True` when `model_info` failed raise EnvironmentError( f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occurred" " while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace" " above." ) from model_info_call_error @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {"self"} optional_names = list(optional_parameters) for name in optional_names: if name in cls._optional_components: expected_modules.add(name) optional_parameters.remove(name) return expected_modules, optional_parameters @property def components(self) -> Dict[str, Any]: r""" The `self.components` property can be useful to run different pipelines with the same weights and configurations without reallocating additional memory. Returns (`dict`): A dictionary containing all the modules needed to initialize the pipeline. Examples: ```py >>> from diffusers import ( ... StableDiffusionPipeline, ... StableDiffusionImg2ImgPipeline, ... StableDiffusionInpaintPipeline, ... ) >>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components) >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components) ``` """ expected_modules, optional_parameters = self._get_signature_keys(self) components = { k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters } if set(components.keys()) != expected_modules: raise ValueError( f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" f" {expected_modules} to be defined, but {components.keys()} are defined." ) return components @staticmethod def numpy_to_pil(images): """ Convert a NumPy image or a batch of images to a PIL image. """ return numpy_to_pil(images) def progress_bar(self, iterable=None, total=None): if not hasattr(self, "_progress_bar_config"): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError( f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." ) if iterable is not None: return tqdm(iterable, **self._progress_bar_config) elif total is not None: return tqdm(total=total, **self._progress_bar_config) else: raise ValueError("Either `total` or `iterable` has to be defined.") def set_progress_bar_config(self, **kwargs): self._progress_bar_config = kwargs def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): r""" Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed up during training is not guaranteed. <Tip warning={true}> โš ๏ธ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes precedent. </Tip> Parameters: attention_op (`Callable`, *optional*): Override the default `None` operator for use as `op` argument to the [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) function of xFormers. Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp >>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) >>> # Workaround for not accepting attention shape using VAE for Flash Attention >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None) ``` """ self.set_use_memory_efficient_attention_xformers(True, attention_op) def disable_xformers_memory_efficient_attention(self): r""" Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). """ self.set_use_memory_efficient_attention_xformers(False) def set_use_memory_efficient_attention_xformers( self, valid: bool, attention_op: Optional[Callable] = None ) -> None: # Recursively walk through all the children. # Any children which exposes the set_use_memory_efficient_attention_xformers method # gets the message def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, "set_use_memory_efficient_attention_xformers"): module.set_use_memory_efficient_attention_xformers(valid, attention_op) for child in module.children(): fn_recursive_set_mem_eff(child) module_names, _ = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] for module in modules: fn_recursive_set_mem_eff(module) def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. For more than one attention head, the computation is performed sequentially over each head. This is useful to save some memory in exchange for a small speed decrease. <Tip warning={true}> โš ๏ธ Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch 2.0 or xFormers. These attention computations are already very memory efficient so you won't need to enable this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs! </Tip> Args: slice_size (`str` or `int`, *optional*, defaults to `"auto"`): When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. Examples: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> pipe = StableDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", ... torch_dtype=torch.float16, ... use_safetensors=True, ... ) >>> prompt = "a photo of an astronaut riding a horse on mars" >>> pipe.enable_attention_slicing() >>> image = pipe(prompt).images[0] ``` """ self.set_attention_slice(slice_size) def disable_attention_slicing(self): r""" Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is computed in one step. """ # set slice_size = `None` to disable `attention slicing` self.enable_attention_slicing(None) def set_attention_slice(self, slice_size: Optional[int]): module_names, _ = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, "set_attention_slice")] for module in modules: module.set_attention_slice(slice_size) class StableDiffusionMixin: r""" Helper for DiffusionPipeline with vae and unet.(mainly for LDM such as stable diffusion) """ def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stages where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if not hasattr(self, "unet"): raise ValueError("The pipeline must have `unet` for using FreeU.") self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) def disable_freeu(self): """Disables the FreeU mechanism if enabled.""" self.unet.disable_freeu() def fuse_qkv_projections(self, unet: bool = True, vae: bool = True): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is ๐Ÿงช experimental. </Tip> Args: unet (`bool`, defaults to `True`): To apply fusion on the UNet. vae (`bool`, defaults to `True`): To apply fusion on the VAE. """ self.fusing_unet = False self.fusing_vae = False if unet: self.fusing_unet = True self.unet.fuse_qkv_projections() self.unet.set_attn_processor(FusedAttnProcessor2_0()) if vae: if not isinstance(self.vae, AutoencoderKL): raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.") self.fusing_vae = True self.vae.fuse_qkv_projections() self.vae.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True): """Disable QKV projection fusion if enabled. <Tip warning={true}> This API is ๐Ÿงช experimental. </Tip> Args: unet (`bool`, defaults to `True`): To apply fusion on the UNet. vae (`bool`, defaults to `True`): To apply fusion on the VAE. """ if unet: if not self.fusing_unet: logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.") else: self.unet.unfuse_qkv_projections() self.fusing_unet = False if vae: if not self.fusing_vae: logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.") else: self.vae.unfuse_qkv_projections() self.fusing_vae = False
diffusers/src/diffusers/pipelines/pipeline_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/pipeline_utils.py", "repo_id": "diffusers", "token_count": 37978 }
124
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["StableDiffusionPipelineOutput"]} if is_transformers_available() and is_flax_available(): _import_structure["pipeline_output"].extend(["FlaxStableDiffusionPipelineOutput"]) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["clip_image_project_model"] = ["CLIPImageProjection"] _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] _import_structure["pipeline_stable_diffusion"] = ["StableDiffusionPipeline"] _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"] _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"] _import_structure["pipeline_stable_diffusion_img2img"] = ["StableDiffusionImg2ImgPipeline"] _import_structure["pipeline_stable_diffusion_inpaint"] = ["StableDiffusionInpaintPipeline"] _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] _import_structure["pipeline_stable_diffusion_instruct_pix2pix"] = ["StableDiffusionInstructPix2PixPipeline"] _import_structure["pipeline_stable_diffusion_latent_upscale"] = ["StableDiffusionLatentUpscalePipeline"] _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] _import_structure["pipeline_stable_diffusion_upscale"] = ["StableDiffusionUpscalePipeline"] _import_structure["pipeline_stable_unclip"] = ["StableUnCLIPPipeline"] _import_structure["pipeline_stable_unclip_img2img"] = ["StableUnCLIPImg2ImgPipeline"] _import_structure["safety_checker"] = ["StableDiffusionSafetyChecker"] _import_structure["stable_unclip_image_normalizer"] = ["StableUnCLIPImageNormalizer"] try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionImageVariationPipeline, ) _dummy_objects.update({"StableDiffusionImageVariationPipeline": StableDiffusionImageVariationPipeline}) else: _import_structure["pipeline_stable_diffusion_image_variation"] = ["StableDiffusionImageVariationPipeline"] try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepth2ImgPipeline, ) _dummy_objects.update( { "StableDiffusionDepth2ImgPipeline": StableDiffusionDepth2ImgPipeline, } ) else: _import_structure["pipeline_stable_diffusion_depth2img"] = ["StableDiffusionDepth2ImgPipeline"] try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_onnx_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) else: _import_structure["pipeline_onnx_stable_diffusion"] = [ "OnnxStableDiffusionPipeline", "StableDiffusionOnnxPipeline", ] _import_structure["pipeline_onnx_stable_diffusion_img2img"] = ["OnnxStableDiffusionImg2ImgPipeline"] _import_structure["pipeline_onnx_stable_diffusion_inpaint"] = ["OnnxStableDiffusionInpaintPipeline"] _import_structure["pipeline_onnx_stable_diffusion_inpaint_legacy"] = ["OnnxStableDiffusionInpaintPipelineLegacy"] _import_structure["pipeline_onnx_stable_diffusion_upscale"] = ["OnnxStableDiffusionUpscalePipeline"] if is_transformers_available() and is_flax_available(): from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState}) _import_structure["pipeline_flax_stable_diffusion"] = ["FlaxStableDiffusionPipeline"] _import_structure["pipeline_flax_stable_diffusion_img2img"] = ["FlaxStableDiffusionImg2ImgPipeline"] _import_structure["pipeline_flax_stable_diffusion_inpaint"] = ["FlaxStableDiffusionInpaintPipeline"] _import_structure["safety_checker_flax"] = ["FlaxStableDiffusionSafetyChecker"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .clip_image_project_model import CLIPImageProjection from .pipeline_stable_diffusion import ( StableDiffusionPipeline, StableDiffusionPipelineOutput, StableDiffusionSafetyChecker, ) from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_instruct_pix2pix import ( StableDiffusionInstructPix2PixPipeline, ) from .pipeline_stable_diffusion_latent_upscale import ( StableDiffusionLatentUpscalePipeline, ) from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionImageVariationPipeline, ) else: from .pipeline_stable_diffusion_image_variation import ( StableDiffusionImageVariationPipeline, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline else: from .pipeline_stable_diffusion_depth2img import ( StableDiffusionDepth2ImgPipeline, ) try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * else: from .pipeline_onnx_stable_diffusion import ( OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline, ) from .pipeline_onnx_stable_diffusion_img2img import ( OnnxStableDiffusionImg2ImgPipeline, ) from .pipeline_onnx_stable_diffusion_inpaint import ( OnnxStableDiffusionInpaintPipeline, ) from .pipeline_onnx_stable_diffusion_upscale import ( OnnxStableDiffusionUpscalePipeline, ) try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_objects import * else: from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_img2img import ( FlaxStableDiffusionImg2ImgPipeline, ) from .pipeline_flax_stable_diffusion_inpaint import ( FlaxStableDiffusionInpaintPipeline, ) from .pipeline_output import FlaxStableDiffusionPipelineOutput from .safety_checker_flax import FlaxStableDiffusionSafetyChecker else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for name, value in _additional_imports.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 3769 }
125
# Copyright 2024 The InstructPix2Pix Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess def preprocess(image): deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): w, h = image[0].size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class StableDiffusionInstructPix2PixPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin ): r""" Pipeline for pixel-level image editing by following text instructions (based on Stable Diffusion). This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "image_latents"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection] = None, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, num_inference_steps: int = 100, guidance_scale: float = 7.5, image_guidance_scale: float = 1.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept image latents as `image`, but if passing latents directly it is not encoded again. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. image_guidance_scale (`float`, *optional*, defaults to 1.5): Push the generated image towards the initial `image`. Image guidance scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a value of at least `1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: ```py >>> import PIL >>> import requests >>> import torch >>> from io import BytesIO >>> from diffusers import StableDiffusionInstructPix2PixPipeline >>> def download_image(url): ... response = requests.get(url) ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") >>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" >>> image = download_image(img_url).resize((512, 512)) >>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( ... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "make the mountains snowy" >>> image = pipe(prompt=prompt, image=image).images[0] ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Check inputs self.check_inputs( prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._image_guidance_scale = image_guidance_scale device = self._execution_device if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if self.do_classifier_free_guidance: image_embeds = torch.cat([image_embeds, negative_image_embeds, negative_image_embeds]) if image is None: raise ValueError("`image` input cannot be undefined.") # 1. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 2. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 3. Preprocess image image = self.image_processor.preprocess(image) # 4. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare Image latents image_latents = self.prepare_image_latents( image, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, self.do_classifier_free_guidance, ) height, width = image_latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor # 6. Prepare latent variables num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Check that shapes of latents and image match the UNet channels num_channels_image = image_latents.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_image`: {num_channels_image} " f" = {num_channels_latents+num_channels_image}. Please verify the config of" " `pipeline.unet` or your `image` input." ) # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Expand the latents if we are doing classifier free guidance. # The latents are expanded 3 times because for pix2pix the guidance\ # is applied for both the text and the input image. latent_model_input = torch.cat([latents] * 3) if self.do_classifier_free_guidance else latents # concat latents, image_latents in the channel dimension scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) # predict the noise residual noise_pred = self.unet( scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_image) + self.image_guidance_scale * (noise_pred_image - noise_pred_uncond) ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) image_latents = callback_outputs.pop("image_latents", image_latents) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_ prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype else: prompt_embeds_dtype = self.unet.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes # pix2pix has two negative embeddings, and unlike in other pipelines latents are ordered [prompt_embeds, negative_prompt_embeds, negative_prompt_embeds] prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs( self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_latents( self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: image_latents = image else: image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand image_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) return image_latents @property def guidance_scale(self): return self._guidance_scale @property def image_guidance_scale(self): return self._image_guidance_scale @property def num_timesteps(self): return self._num_timesteps # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self.guidance_scale > 1.0 and self.image_guidance_scale >= 1.0
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py", "repo_id": "diffusers", "token_count": 17595 }
126
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect from typing import Callable, List, Optional, Union import torch from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras from ...image_processor import VaeImageProcessor from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import LMSDiscreteScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model self.alphas_cumprod = alphas_cumprod def apply_model(self, *args, **kwargs): if len(args) == 3: encoder_hidden_states = args[-1] args = args[:2] if kwargs.get("cond", None) is not None: encoder_hidden_states = kwargs.pop("cond") return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample class StableDiffusionKDiffusionPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin ): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights <Tip warning={true}> This is an experimental pipeline and is likely to change in the future. </Tip> Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker: bool = True, ): super().__init__() logger.info( f"{self.__class__} is an experimntal pipeline and is likely to change in the future. We recommend to use" " this pipeline for fast experimentation / iteration if needed, but advice to rely on existing pipelines" " as defined in https://huggingface.co/docs/diffusers/api/schedulers#implemented-schedulers for" " production settings." ) # get correct sigmas from LMS scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config(requires_safety_checker=requires_safety_checker) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) model = ModelWrapper(unet, scheduler.alphas_cumprod) if scheduler.config.prediction_type == "v_prediction": self.k_diffusion_model = CompVisVDenoiser(model) else: self.k_diffusion_model = CompVisDenoiser(model) def set_scheduler(self, scheduler_type: str): library = importlib.import_module("k_diffusion") sampling = getattr(library, "sampling") try: self.sampler = getattr(sampling, scheduler_type) except Exception: valid_samplers = [] for s in dir(sampling): if "sample_" in s: valid_samplers.append(s) raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, use_karras_sigmas: Optional[bool] = False, noise_sampler_seed: Optional[int] = None, clip_skip: int = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Use karras sigmas. For example, specifying `sample_dpmpp_2m` to `set_scheduler` will be equivalent to `DPM++2M` in stable-diffusion-webui. On top of that, setting this option to True will make it `DPM++2M Karras`. noise_sampler_seed (`int`, *optional*, defaults to `None`): The random seed to use for the noise sampler. If `None`, a random seed will be generated. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = True if guidance_scale <= 1.0: raise ValueError("has to use guidance_scale") # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) # 5. Prepare sigmas if use_karras_sigmas: sigma_min: float = self.k_diffusion_model.sigmas[0].item() sigma_max: float = self.k_diffusion_model.sigmas[-1].item() sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) sigmas = sigmas.to(device) else: sigmas = self.scheduler.sigmas sigmas = sigmas.to(prompt_embeds.dtype) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) latents = latents * sigmas[0] self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) # 7. Define model function def model_fn(x, t): latent_model_input = torch.cat([x] * 2) t = torch.cat([t] * 2) noise_pred = self.k_diffusion_model(latent_model_input, t, cond=prompt_embeds) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred # 8. Run k-diffusion solver sampler_kwargs = {} if "noise_sampler" in inspect.signature(self.sampler).parameters: min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) sampler_kwargs["noise_sampler"] = noise_sampler if "generator" in inspect.signature(self.sampler).parameters: sampler_kwargs["generator"] = generator latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py", "repo_id": "diffusers", "token_count": 14484 }
127
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import ( AttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from .watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionXLImg2ImgPipeline >>> from diffusers.utils import load_image >>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" >>> init_image = load_image(url).convert("RGB") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt, image=init_image).images[0] ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusionXLImg2ImgPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "add_neg_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, requires_aesthetics_score: bool = False, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, strength, num_inference_steps, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if num_inference_steps is None: raise ValueError("`num_inference_steps` cannot be None.") elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError( f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" f" {type(num_inference_steps)}." ) if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack( [single_negative_image_embeds] * num_images_per_prompt, dim=0 ) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: repeat_dims = [1] image_embeds = [] for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) single_negative_image_embeds = single_negative_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) ) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) else: single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) image_embeds.append(single_image_embeds) return image_embeds def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None, ): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list( negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) ) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if ( expected_add_embed_dim > passed_add_embed_dim and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." ) elif ( expected_add_embed_dim < passed_add_embed_dim and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." ) elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return add_time_ids, add_neg_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.FloatTensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, strength: float = 0.3, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): The image(s) to modify with the pipeline. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of `denoising_start` being declared as an integer, the value of `strength` will be ignored. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. aesthetic_score (`float`, *optional*, defaults to 6.0): Used to simulate an aesthetic score of the generated image by influencing the positive text condition. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_aesthetic_score (`float`, *optional*, defaults to 2.5): Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to simulate an aesthetic score of the generated image by influencing the negative text condition. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, strength, num_inference_steps, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # 4. Preprocess image image = self.image_processor.preprocess(image) # 5. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) add_noise = True if self.denoising_start is None else False # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, add_noise, ) # 7. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 8. Prepare added time ids & embeddings if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 9. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 9.1 Apply denoising_end if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 9.2 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py", "repo_id": "diffusers", "token_count": 33616 }
128
# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import List, Optional, Tuple, Union import torch from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel from ...schedulers import UnCLIPScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name class UnCLIPPipeline(DiffusionPipeline): """ Pipeline for text-to-image generation using unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: text_encoder ([`~transformers.CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution UNet. Used in all but the last step of the super resolution diffusion process. super_res_last ([`UNet2DModel`]): Super resolution UNet. Used in the last step of the super resolution diffusion process. prior_scheduler ([`UnCLIPScheduler`]): Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]). decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). """ _exclude_from_cpu_offload = ["prior"] prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler model_cpu_offload_seq = "text_encoder->text_proj->decoder->super_res_first->super_res_last" def __init__( self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__() self.register_modules( prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_enc_hid_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_enc_hid_states, text_mask @torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, prior_num_inference_steps: int = 25, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prior_latents: Optional[torch.FloatTensor] = None, decoder_latents: Optional[torch.FloatTensor] = None, super_res_latents: Optional[torch.FloatTensor] = None, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output` and `text_attention_mask` is passed. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the prior. More denoising steps usually lead to a higher quality image at the expense of slower inference. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7): The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*): Pre-generated noisy latents to be used as inputs for the prior. decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. prior_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. text_model_output (`CLIPTextModelOutput`, *optional*): Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text outputs can be passed for tasks like text embedding interpolations. Make sure to also pass `text_attention_mask` in this case. `prompt` can the be left `None`. text_attention_mask (`torch.Tensor`, *optional*): Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention masks are necessary when passing `text_model_output`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if prompt is not None: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") else: batch_size = text_model_output[0].shape[0] device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 prompt_embeds, text_enc_hid_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_enc_hid_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents # done prior # decoder text_enc_hid_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_enc_hid_states, do_classifier_free_guidance=do_classifier_free_guidance, ) if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_enc_hid_states.dtype, device, generator, decoder_latents, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_enc_hid_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, super_res_latents, self.super_res_scheduler, ) if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample image = super_res_latents # done super res self.maybe_free_model_hooks() # post processing image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py", "repo_id": "diffusers", "token_count": 9940 }
129
# Schedulers For more information on the schedulers, please refer to the [docs](https://huggingface.co/docs/diffusers/api/schedulers/overview).
diffusers/src/diffusers/schedulers/README.md/0
{ "file_path": "diffusers/src/diffusers/schedulers/README.md", "repo_id": "diffusers", "token_count": 46 }
130
# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class LCMSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor denoised: Optional[torch.FloatTensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor: """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.FloatTensor`): the betas that the scheduler is being initialized with. Returns: `torch.FloatTensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class LCMScheduler(SchedulerMixin, ConfigMixin): """ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with non-Markovian guidance. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. original_inference_steps (`int`, *optional*, defaults to 50): The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule. clip_sample (`bool`, defaults to `True`): Clip the predicted sample for numerical stability. clip_sample_range (`float`, defaults to 1.0): The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. set_alpha_to_one (`bool`, defaults to `True`): Each diffusion step uses the alphas product value at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, otherwise it uses the alpha value at step 0. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True`. timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. timestep_scaling (`float`, defaults to 10.0): The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation error at the default of `10.0` is already pretty small). rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.00085, beta_end: float = 0.012, beta_schedule: str = "scaled_linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, original_inference_steps: int = 50, clip_sample: bool = False, clip_sample_range: float = 1.0, set_alpha_to_one: bool = True, steps_offset: int = 0, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, timestep_spacing: str = "leading", timestep_scaling: float = 10.0, rescale_betas_zero_snr: bool = False, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 # `set_alpha_to_one` decides whether we set this parameter simply to one or # whether we use the final alpha of the "non-previous" one. self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) self.custom_timesteps = False self._step_index = None self._begin_index = None # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index @property def step_index(self): return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def set_timesteps( self, num_inference_steps: Optional[int] = None, device: Union[str, torch.device] = None, original_inference_steps: Optional[int] = None, timesteps: Optional[List[int]] = None, strength: int = 1.0, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`, *optional*): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. original_inference_steps (`int`, *optional*): The original number of inference steps, which will be used to generate a linearly-spaced timestep schedule (which is different from the standard `diffusers` implementation). We will then take `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of equal spacing between timesteps on the training/distillation timestep schedule is used. If `timesteps` is passed, `num_inference_steps` must be `None`. """ # 0. Check inputs if num_inference_steps is None and timesteps is None: raise ValueError("Must pass exactly one of `num_inference_steps` or `custom_timesteps`.") if num_inference_steps is not None and timesteps is not None: raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") # 1. Calculate the LCM original training/distillation timestep schedule. original_steps = ( original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps ) if original_steps > self.config.num_train_timesteps: raise ValueError( f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) # LCM Timesteps Setting # The skipping step parameter k from the paper. k = self.config.num_train_timesteps // original_steps # LCM Training/Distillation Steps Schedule # Currently, only a linearly-spaced schedule is supported (same as in the LCM distillation scripts). lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 # 2. Calculate the LCM inference timestep schedule. if timesteps is not None: # 2.1 Handle custom timestep schedules. train_timesteps = set(lcm_origin_timesteps) non_train_timesteps = [] for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError("`custom_timesteps` must be in descending order.") if timesteps[i] not in train_timesteps: non_train_timesteps.append(timesteps[i]) if timesteps[0] >= self.config.num_train_timesteps: raise ValueError( f"`timesteps` must start before `self.config.train_timesteps`:" f" {self.config.num_train_timesteps}." ) # Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1 if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: logger.warning( f"The first timestep on the custom timestep schedule is {timesteps[0]}, not" f" `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get" f" unexpected results when using this timestep schedule." ) # Raise warning if custom timestep schedule contains timesteps not on original timestep schedule if non_train_timesteps: logger.warning( f"The custom timestep schedule contains the following timesteps which are not on the original" f" training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results" f" when using this timestep schedule." ) # Raise warning if custom timestep schedule is longer than original_steps if len(timesteps) > original_steps: logger.warning( f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" f" the length of the timestep schedule used for training: {original_steps}. You may get some" f" unexpected results when using this timestep schedule." ) timesteps = np.array(timesteps, dtype=np.int64) self.num_inference_steps = len(timesteps) self.custom_timesteps = True # Apply strength (e.g. for img2img pipelines) (see StableDiffusionImg2ImgPipeline.get_timesteps) init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) t_start = max(self.num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.order :] # TODO: also reset self.num_inference_steps? else: # 2.2 Create the "standard" LCM inference timestep schedule. if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) skipping_step = len(lcm_origin_timesteps) // num_inference_steps if skipping_step < 1: raise ValueError( f"The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}." ) self.num_inference_steps = num_inference_steps if num_inference_steps > original_steps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:" f" {original_steps} because the final timestep schedule will be a subset of the" f" `original_inference_steps`-sized initial timestep schedule." ) # LCM Inference Steps Schedule lcm_origin_timesteps = lcm_origin_timesteps[::-1].copy() # Select (approximately) evenly spaced indices from lcm_origin_timesteps. inference_indices = np.linspace(0, len(lcm_origin_timesteps), num=num_inference_steps, endpoint=False) inference_indices = np.floor(inference_indices).astype(np.int64) timesteps = lcm_origin_timesteps[inference_indices] self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) self._step_index = None self._begin_index = None def get_scalings_for_boundary_condition_discrete(self, timestep): self.sigma_data = 0.5 # Default: 0.5 scaled_timestep = timestep * self.config.timestep_scaling c_skip = self.sigma_data**2 / (scaled_timestep**2 + self.sigma_data**2) c_out = scaled_timestep / (scaled_timestep**2 + self.sigma_data**2) ** 0.5 return c_skip, c_out def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[LCMSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) # 1. get previous step value prev_step_index = self.step_index + 1 if prev_step_index < len(self.timesteps): prev_timestep = self.timesteps[prev_step_index] else: prev_timestep = timestep # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 3. Get scalings for boundary conditions c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep) # 4. Compute the predicted original sample x_0 based on the model parameterization if self.config.prediction_type == "epsilon": # noise-prediction predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() elif self.config.prediction_type == "sample": # x-prediction predicted_original_sample = model_output elif self.config.prediction_type == "v_prediction": # v-prediction predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for `LCMScheduler`." ) # 5. Clip or threshold "predicted x_0" if self.config.thresholding: predicted_original_sample = self._threshold_sample(predicted_original_sample) elif self.config.clip_sample: predicted_original_sample = predicted_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 6. Denoise model output using boundary conditions denoised = c_out * predicted_original_sample + c_skip * sample # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference # Noise is not used on the final timestep of the timestep schedule. # This also means that noise is not used for one-step sampling. if self.step_index != self.num_inference_steps - 1: noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=denoised.dtype ) prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise else: prev_sample = denoised # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample, denoised) return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement # for the subsequent add_noise calls self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity def get_velocity( self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as sample self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep def previous_timestep(self, timestep): if self.custom_timesteps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: num_inference_steps = ( self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps ) prev_t = timestep - self.config.num_train_timesteps // num_inference_steps return prev_t
diffusers/src/diffusers/schedulers/scheduling_lcm.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_lcm.py", "repo_id": "diffusers", "token_count": 13433 }
131
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from packaging import version from .. import __version__ from .constants import ( CONFIG_NAME, DEPRECATED_REVISION_ARGS, DIFFUSERS_DYNAMIC_MODULE_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MIN_PEFT_VERSION, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, SAFETENSORS_FILE_EXTENSION, SAFETENSORS_WEIGHTS_NAME, USE_PEFT_BACKEND, WEIGHTS_NAME, ) from .deprecation_utils import deprecate from .doc_utils import replace_example_docstring from .dynamic_modules_utils import get_class_from_dynamic_module from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video from .hub_utils import ( PushToHubMixin, _add_variant, _get_model_file, extract_commit_hash, http_user_agent, ) from .import_utils import ( BACKENDS_MAPPING, DIFFUSERS_SLOW_IMPORT, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_accelerate_available, is_accelerate_version, is_bs4_available, is_flax_available, is_ftfy_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_peft_available, is_scipy_available, is_tensorboard_available, is_torch_available, is_torch_npu_available, is_torch_version, is_torch_xla_available, is_torchsde_available, is_torchvision_available, is_transformers_available, is_transformers_version, is_unidecode_available, is_wandb_available, is_xformers_available, requires_backends, ) from .loading_utils import load_image from .logging import get_logger from .outputs import BaseOutput from .peft_utils import ( check_peft_version, delete_adapter_layers, get_adapter_name, get_peft_kwargs, recurse_remove_peft_layers, scale_lora_layers, set_adapter_layers, set_weights_and_activate_adapters, unscale_lora_layers, ) from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil from .state_dict_utils import ( convert_all_state_dict_to_peft, convert_state_dict_to_diffusers, convert_state_dict_to_kohya, convert_state_dict_to_peft, convert_unet_state_dict_to_peft, ) logger = get_logger(__name__) def check_min_version(min_version): if version.parse(__version__) < version.parse(min_version): if "dev" in min_version: error_message = ( "This example requires a source install from HuggingFace diffusers (see " "`https://huggingface.co/docs/diffusers/installation#install-from-source`)," ) else: error_message = f"This example requires a minimum version of {min_version}," error_message += f" but the version found is {__version__}.\n" raise ImportError(error_message)
diffusers/src/diffusers/utils/__init__.py/0
{ "file_path": "diffusers/src/diffusers/utils/__init__.py", "repo_id": "diffusers", "token_count": 1496 }
132
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class SpectrogramDiffusionPipeline(metaclass=DummyObject): _backends = ["transformers", "torch", "note_seq"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers", "torch", "note_seq"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"])
diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py", "repo_id": "diffusers", "token_count": 236 }
133
import inspect from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax @require_flax class FlaxModelTesterMixin: def test_output(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) jax.lax.stop_gradient(variables) output = model.apply(variables, inputs_dict["sample"]) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) jax.lax.stop_gradient(variables) output = model.apply(variables, inputs_dict["sample"]) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_deprecated_kwargs(self): has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" " from `_deprecated_kwargs = [<deprecated_argument>]`" )
diffusers/tests/models/test_modeling_common_flax.py/0
{ "file_path": "diffusers/tests/models/test_modeling_common_flax.py", "repo_id": "diffusers", "token_count": 1124 }
134
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from diffusers import ( DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, PNDMScheduler, logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils.testing_utils import CaptureLogger class SampleObject(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], ): pass class SampleObject2(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", f=[1, 3], ): pass class SampleObject3(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], f=[1, 3], ): pass class SampleObject4(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 5], f=[5, 4], ): pass class ConfigTester(unittest.TestCase): def test_load_not_from_mixin(self): with self.assertRaises(ValueError): ConfigMixin.load_config("dummy_path") def test_register_to_config(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # init ignore private arguments obj = SampleObject(_name_or_path="lalala") config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can override default obj = SampleObject(c=6) config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can use positional arguments. obj = SampleObject(1, c=6) config = obj.config assert config["a"] == 1 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] def test_save_load(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname)) new_config = new_obj.config # unfreeze configs config = dict(config) new_config = dict(new_config) assert config.pop("c") == (2, 5) # instantiated as tuple assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json config.pop("_use_default_values") assert config == new_config def test_load_ddim_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddim = DDIMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert ddim.__class__ == DDIMScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_ancestral_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerAncestralDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerAncestralDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pndm = PNDMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert pndm.__class__ == PNDMScheduler # no warning should be thrown assert cap_logger.out == "" def test_overwrite_config_on_load(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddpm = DDPMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler", prediction_type="sample", beta_end=8, ) with CaptureLogger(logger) as cap_logger_2: ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) assert ddpm.__class__ == DDPMScheduler assert ddpm.config.prediction_type == "sample" assert ddpm.config.beta_end == 8 assert ddpm_2.config.beta_start == 88 # no warning should be thrown assert cap_logger.out == "" assert cap_logger_2.out == "" def test_load_dpmsolver(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: dpm = DPMSolverMultistepScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert dpm.__class__ == DPMSolverMultistepScheduler # no warning should be thrown assert cap_logger.out == "" def test_use_default_values(self): # let's first save a config that should be in the form # a=2, # b=5, # c=(2, 5), # d="for diffusion", # e=[1, 3], config = SampleObject() config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")} # make sure that default config has all keys in `_use_default_values` assert set(config_dict.keys()) == set(config.config._use_default_values) with tempfile.TemporaryDirectory() as tmpdirname: config.save_config(tmpdirname) # now loading it with SampleObject2 should put f into `_use_default_values` config = SampleObject2.from_config(SampleObject2.load_config(tmpdirname)) assert "f" in config.config._use_default_values assert config.config.f == [1, 3] # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value # **BECAUSE** it is part of `config.config._use_default_values` new_config = SampleObject4.from_config(config.config) assert new_config.config.f == [5, 4] config.config._use_default_values.pop() new_config_2 = SampleObject4.from_config(config.config) assert new_config_2.config.f == [1, 3] # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5] assert new_config_2.config.e == [1, 3]
diffusers/tests/others/test_config.py/0
{ "file_path": "diffusers/tests/others/test_config.py", "repo_id": "diffusers", "token_count": 4006 }
135
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_canny(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "bird" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) canny_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 def test_pose(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "Chef in the kitchen" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) pose_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/controlnet/test_flax_controlnet.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_flax_controlnet.py", "repo_id": "diffusers", "token_count": 2141 }
136
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) # override default test because no output_type "latent", use "pt" instead def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["num_inference_steps"] = 2 inputs["output_type"] = "pt" output = pipe(**inputs)[0] assert output.abs().sum() == 0
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py", "repo_id": "diffusers", "token_count": 4049 }
137
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, StableCascadeDecoderPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, load_pt, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableCascadeDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableCascadeDecoderPipeline params = ["prompt"] batch_params = ["image_embeddings", "prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, "out_channels": 4, "conditioning_dim": 128, "block_out_channels": [16, 32, 64, 128], "num_attention_heads": [-1, -1, 1, 2], "down_num_layers_per_block": [1, 1, 1, 1], "up_num_layers_per_block": [1, 1, 1, 1], "down_blocks_repeat_mappers": [1, 1, 1, 1], "up_blocks_repeat_mappers": [3, 3, 2, 2], "block_types_per_layer": [ ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], "switch_level": None, "clip_text_pooled_in_channels": 32, "dropout": [0.1, 0.1, 0.1, 0.1], } model = StableCascadeUNet(**model_kwargs) return model.eval() def get_dummy_components(self): decoder = self.dummy_decoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer vqgan = self.dummy_vqgan scheduler = DDPMWuerstchenScheduler() components = { "decoder": decoder, "vqgan": vqgan, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "latent_dim_scale": 4.0, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeddings": torch.ones((1, 4, 4, 4), device=device), "prompt": "horse", "generator": generator, "guidance_scale": 2.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_decoder(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="fp16 not supported") def test_float16_inference(self): super().test_float16_inference() def test_stable_cascade_decoder_prompt_embeds(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image_embeddings = inputs["image_embeddings"] prompt = "A photograph of a shiba inu, wearing a hat" ( prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled, ) = pipe.encode_prompt(device, 1, 1, False, prompt=prompt) generator = torch.Generator(device=device) decoder_output_prompt = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) decoder_output_prompt_embeds = pipe( image_embeddings=image_embeddings, prompt=None, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) assert np.abs(decoder_output_prompt.images - decoder_output_prompt_embeds.images).max() < 1e-5 def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) prior_num_images_per_prompt = 2 decoder_num_images_per_prompt = 2 prompt = ["a cat"] batch_size = len(prompt) generator = torch.Generator(device) image_embeddings = randn_tensor( (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) ) decoder_output = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", guidance_scale=0.0, generator=generator.manual_seed(0), num_images_per_prompt=decoder_num_images_per_prompt, ) assert decoder_output.images.shape[0] == ( batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt ) def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings_with_guidance(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) prior_num_images_per_prompt = 2 decoder_num_images_per_prompt = 2 prompt = ["a cat"] batch_size = len(prompt) generator = torch.Generator(device) image_embeddings = randn_tensor( (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) ) decoder_output = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", guidance_scale=2.0, generator=generator.manual_seed(0), num_images_per_prompt=decoder_num_images_per_prompt, ) assert decoder_output.images.shape[0] == ( batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt ) @slow @require_torch_gpu class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_cascade_decoder(self): pipe = StableCascadeDecoderPipeline.from_pretrained( "stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image_embedding = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/image_embedding.pt" ) image = pipe( prompt=prompt, image_embeddings=image_embedding, output_type="np", num_inference_steps=2, generator=generator, ).images[0] assert image.shape == (1024, 1024, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/stable_cascade_decoder_image.npy" ) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py/0
{ "file_path": "diffusers/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py", "repo_id": "diffusers", "token_count": 5520 }
138
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import nightly, require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @nightly @require_flax class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_flax(self): sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2", revision="bf16", dtype=jnp.bfloat16, ) prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 @nightly @require_flax class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_dpm_flax(self): model_id = "stabilityai/stable-diffusion-2" scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, revision="bf16", dtype=jnp.bfloat16, ) params["scheduler"] = scheduler_params prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py", "repo_id": "diffusers", "token_count": 1712 }
139
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false test_xformers_attention = False def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # prior components torch.manual_seed(0) prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) prior_text_encoder = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) prior = PriorTransformer( num_attention_heads=2, attention_head_dim=12, embedding_dim=embedder_projection_dim, num_layers=1, ) torch.manual_seed(0) prior_scheduler = DDPMScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=5.0, beta_schedule="squaredcos_cap_v2", ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", } return inputs # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @nightly @require_torch_gpu class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe("anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "anime turtle", prior_num_inference_steps=2, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py/0
{ "file_path": "diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py", "repo_id": "diffusers", "token_count": 3989 }
140
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class PNDMSchedulerTest(SchedulerCommonTest): scheduler_classes = (PNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.ets = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.prk_timesteps): residual = model(sample, t) sample = scheduler.step_prk(residual, t, sample).prev_sample for i, t in enumerate(scheduler.plms_timesteps): residual = model(sample, t) sample = scheduler.step_plms(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] scheduler.ets = dummy_past_residuals[:] output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps, torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), ) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [1, 5, 10]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps) def test_pow_of_3_inference_steps(self): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 num_inference_steps = 27 for scheduler_class in self.scheduler_classes: sample = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): sample = scheduler.step_prk(residual, t, sample).prev_sample def test_inference_plms_no_past_residuals(self): with self.assertRaises(ValueError): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 198.1318) < 1e-2 assert abs(result_mean.item() - 0.2580) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 67.3986) < 1e-2 assert abs(result_mean.item() - 0.0878) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 230.0399) < 1e-2 assert abs(result_mean.item() - 0.2995) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 186.9482) < 1e-2 assert abs(result_mean.item() - 0.2434) < 1e-3
diffusers/tests/schedulers/test_scheduler_pndm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_pndm.py", "repo_id": "diffusers", "token_count": 4654 }
141
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests from packaging.version import parse # GitHub repository details USER = "huggingface" REPO = "diffusers" def fetch_all_branches(user, repo): branches = [] # List to store all branches page = 1 # Start from first page while True: # Make a request to the GitHub API for the branches response = requests.get(f"https://api.github.com/repos/{user}/{repo}/branches", params={"page": page}) # Check if the request was successful if response.status_code == 200: # Add the branches from the current page to the list branches.extend([branch["name"] for branch in response.json()]) # Check if there is a 'next' link for pagination if "next" in response.links: page += 1 # Move to the next page else: break # Exit loop if there is no next page else: print("Failed to retrieve branches:", response.status_code) break return branches def main(): # Fetch all branches branches = fetch_all_branches(USER, REPO) # Filter branches. # print(f"Total branches: {len(branches)}") filtered_branches = [] for branch in branches: if branch.startswith("v") and ("-release" in branch or "-patch" in branch): filtered_branches.append(branch) # print(f"Filtered: {branch}") sorted_branches = sorted(filtered_branches, key=lambda x: parse(x.split("-")[0][1:]), reverse=True) latest_branch = sorted_branches[0] # print(f"Latest branch: {latest_branch}") return latest_branch if __name__ == "__main__": print(main())
diffusers/utils/fetch_latest_release_branch.py/0
{ "file_path": "diffusers/utils/fetch_latest_release_branch.py", "repo_id": "diffusers", "token_count": 824 }
142
<jupyter_start><jupyter_text>*FineTuning* et guidageDans ce *notebook*, nous allons couvrir deux approches principales pour adapter les modรจles de diffusion existants :* Avec le *finetuning*, nous entraรฎnons de nouveau les modรจles existants sur de nouvelles donnรฉes dans le but de modifier le rรฉsultat qu'ils produisent.* Avec **guidage**, nous prenons un modรจle existant et dirigeons le processus de gรฉnรฉration au moment de l'infรฉrence pour un contrรดle supplรฉmentaire. Ce que vous apprendrez :A la fin de ce *notebook*, vous saurez comment :- Crรฉer une boucle d'รฉchantillonnage et gรฉnรฉrer des รฉchantillons plus rapidement ร  l'aide d'un nouveau planificateur- *Finetuner* un modรจle de diffusion existant sur de nouvelles donnรฉes, y compris : - Utiliser l'accumulation du gradient pour contourner certains des problรจmes liรฉs aux petits batchs. - Enregistrer les รฉchantillons dans [Weights and Biases] (https://wandb.ai/site) pendant l'entraรฎnement pour suivre la progression (via le script d'exemple joint). - Sauvegarder le pipeline rรฉsultant et le tรฉlรฉcharger sur le Hub- Guider le processus d'รฉchantillonnage avec des fonctions de perte supplรฉmentaires pour ajouter un contrรดle sur les modรจles existants, y compris : - Explorer diffรฉrentes approches de guidage avec une simple perte basรฉe sur la couleur - Utiliser CLIP pour guider la gรฉnรฉration ร  l'aide d'un prompt de texte - Partager une boucle d'รฉchantillonnage personnalisรฉe en utilisant Gradio et ๐Ÿค— Spaces.โ“ Si vous avez des questions, merci de les poster sur le canal `diffusion-models-class` du [serveur Discord d'Hugging Face](https://huggingface.co/join/discord). Configuration et importationsPour enregistrer vos modรจles *finetunรฉs* sur le Hub d'Hugging Face, vous devrez vous connecter avec un *token* qui a un accรจs en รฉcriture. Le code ci-dessous vous invite ร  le faire et vous renvoie ร  la page des *tokens* de votre compte. Vous aurez รฉgalement besoin d'un compte Weights and Biases si vous souhaitez utiliser le script d'entraรฎnement pour enregistrer des รฉchantillons au fur et ร  mesure que le modรจle s'entraรฎne. Lร  encore, le code devrait vous inviter ร  vous connecter lร  oรน c'est nรฉcessaire.A part cela, la seule chose ร  faire est d'installer quelques dรฉpendances, d'importer tout ce dont nous aurons besoin et de spรฉcifier l'appareil que nous utiliserons :<jupyter_code>!pip install -qq diffusers datasets accelerate wandb open-clip-torch # Code pour se connecter au Hub d'Hugging Face, nรฉcessaire pour partager les modรจles # Assurez-vous d'utiliser un *token* avec un accรจs WRITE (รฉcriture) from huggingface_hub import notebook_login notebook_login() import numpy as np import torch import torch.nn.functional as F import torchvision from datasets import load_dataset from diffusers import DDIMScheduler, DDPMPipeline from matplotlib import pyplot as plt from PIL import Image from torchvision import transforms from tqdm.auto import tqdm device = ( "mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu" )<jupyter_output><empty_output><jupyter_text>Chargement d'un pipeline prรฉ-entraรฎnรฉPour commencer ce *notebook*, chargeons un pipeline existant et voyons ce que nous pouvons en faire :<jupyter_code>image_pipe = DDPMPipeline.from_pretrained("google/ddpm-celebahq-256") image_pipe.to(device);<jupyter_output><empty_output><jupyter_text>La gรฉnรฉration d'images est aussi simple que l'exรฉcution de la mรฉthode [`__call__`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddpm/pipeline_ddpm.pyL42) du pipeline en l'appelant comme une fonction :<jupyter_code>images = image_pipe().images images[0]<jupyter_output><empty_output><jupyter_text>Sympathique, mais LENT ! Avant d'aborder les sujets principaux du jour, jetons un coup d'ล“il ร  la boucle d'รฉchantillonnage proprement dite et voyons comment nous pouvons utiliser un รฉchantillonneur plus sophistiquรฉ pour l'accรฉlรฉrer. ร‰chantillonnage plus rapide avec DDIMร€ chaque รฉtape, le modรจle est nourri d'une entrรฉe bruyante et il lui est demandรฉ de prรฉdire le bruit (et donc une estimation de ce ร  quoi l'image entiรจrement dรฉbruitรฉe pourrait ressembler). Au dรฉpart, ces prรฉdictions ne sont pas trรจs bonnes, c'est pourquoi nous dรฉcomposons le processus en plusieurs รฉtapes. Cependant, l'utilisation de plus de 1000 รฉtapes s'est avรฉrรฉe inutile, et une multitude de recherches rรฉcentes ont explorรฉ la maniรจre d'obtenir de bons รฉchantillons avec le moins d'รฉtapes possible. Dans la bibliothรจque ๐Ÿค— *Diffusers*, ces mรฉthodes d'รฉchantillonnage sont gรฉrรฉes par un planificateur, qui doit effectuer chaque mise ร  jour via la fonction `step()`. Pour gรฉnรฉrer une image, on commence par un bruit alรฉatoire $x$. Ensuite, pour chaque pas de temps dans le planificateur de bruit, nous introduisons l'entrรฉe bruitรฉe $x$ dans le modรจle et transmettons la prรฉdiction rรฉsultante ร  la fonction `step()`. Celle-ci renvoie une sortie avec un attribut `prev_sample`. "previous" parce que nous revenons en arriรจre dans le temps, d'un niveau de bruit รฉlevรฉ ร  un niveau de bruit faible (ร  l'inverse du processus de diffusion vers l'avant).Voyons cela en action ! Tout d'abord, nous chargeons un planificateur, ici un `DDIMScheduler` basรฉ sur le papier [*Denoising Diffusion Implicit Models*](https://arxiv.org/abs/2010.02502) qui peut donner des รฉchantillons dรฉcents en beaucoup moins d'รฉtapes que l'implรฉmentation originale du DDPM :<jupyter_code># Crรฉer un nouveau planificateur et dรฉfinir le nombre d'รฉtapes d'infรฉrence scheduler = DDIMScheduler.from_pretrained("google/ddpm-celebahq-256") scheduler.set_timesteps(num_inference_steps=40)<jupyter_output><empty_output><jupyter_text>Vous pouvez constater que ce modรจle effectue 40 รฉtapes au total, chaque saut รฉquivalant ร  25 รฉtapes du programme original de 1000 รฉtapes :<jupyter_code>scheduler.timesteps<jupyter_output><empty_output><jupyter_text>Crรฉons 4 images alรฉatoires et exรฉcutons la boucle d'รฉchantillonnage, en visualisant ร  la fois le $x$ actuel et la version dรฉbruitรฉe prรฉdite au fur et ร  mesure de l'avancement du processus :<jupyter_code># Le point de dรฉpart alรฉatoire x = torch.randn(4, 3, 256, 256).to(device) # Batch de 4 images ร  3 canaux de 256 x 256 px # Boucle sur les pas de temps d'รฉchantillonnage for i, t in tqdm(enumerate(scheduler.timesteps)): # Prรฉparer l'entrรฉe du modรจle model_input = scheduler.scale_model_input(x, t) # Obtenir la prรฉdiction with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] # Calculer la forme que devrait prendre l'รฉchantillon mis ร  jour ร  l'aide du planificateur scheduler_output = scheduler.step(noise_pred, t, x) # Mise ร  jour de x x = scheduler_output.prev_sample # Occasionnellement, afficher ร  la fois x et les images dรฉbruitรฉes prรฉdites if i % 10 == 0 or i == len(scheduler.timesteps) - 1: fig, axs = plt.subplots(1, 2, figsize=(12, 5)) grid = torchvision.utils.make_grid(x, nrow=4).permute(1, 2, 0) axs[0].imshow(grid.cpu().clip(-1, 1) * 0.5 + 0.5) axs[0].set_title(f"Current x (step {i})") pred_x0 = ( scheduler_output.pred_original_sample ) # Non disponible pour tous les planificateurs grid = torchvision.utils.make_grid(pred_x0, nrow=4).permute(1, 2, 0) axs[1].imshow(grid.cpu().clip(-1, 1) * 0.5 + 0.5) axs[1].set_title(f"Predicted denoised images (step {i})") plt.show()<jupyter_output><empty_output><jupyter_text>Comme vous pouvez le voir, les prรฉdictions initiales ne sont pas trรจs bonnes, mais au fur et ร  mesure que le processus se poursuit, les rรฉsultats prรฉdits deviennent de plus en plus prรฉcis. Si vous รชtes curieux de savoir ce qui se passe ร  l'intรฉrieur de la fonction `step()`, inspectez le code (bien commentรฉ) avec :<jupyter_code># ??scheduler.step<jupyter_output><empty_output><jupyter_text>Vous pouvez รฉgalement insรฉrer ce nouveau planificateur ร  la place du planificateur original fourni avec le pipeline, et รฉchantillonner de la maniรจre suivante :<jupyter_code>image_pipe.scheduler = scheduler images = image_pipe(num_inference_steps=40).images images[0]<jupyter_output><empty_output><jupyter_text>Trรจs bien, nous pouvons maintenant obtenir des รฉchantillons dans un dรฉlai raisonnable ! Cela devrait accรฉlรฉrer les choses au fur et ร  mesure que nous avanรงons dans le reste de ce *notebook* :) FinetuningEt maintenant, le plus amusant ! ร‰tant donnรฉ ce pipeline prรฉ-entraรฎnรฉ, comment pouvons-nous rรฉentraรฎner le modรจle pour gรฉnรฉrer des images sur la base de nouvelles donnรฉes d'entraรฎnement ?Il s'avรจre que cela est presque identique ร  entraรฎner un modรจle ร  partir de zรฉro (comme nous l'avons vu dans l'unitรฉ 1), sauf que nous commenรงons avec le modรจle existant. Voyons cela en action et abordons quelques considรฉrations supplรฉmentaires au fur et ร  mesure. Tout d'abord, le jeu de donnรฉes : vous pouvez essayer ce [jeu de donnรฉes de visages vintage](https://huggingface.co/datasets/Norod78/Vintage-Faces-FFHQAligned) ou ces [visages animรฉs](https://huggingface.co/datasets/huggan/anime-faces) pour quelque chose de plus proche des donnรฉes d'entraรฎnement originales de ce modรจle de visages. Mais pour le plaisir, utilisons plutรดt le mรชme petit jeu de donnรฉes de papillons que nous avons utilisรฉ pour nous entraรฎner ร  partir de zรฉro dans l'unitรฉ 1. Exรฉcutez le code ci-dessous pour tรฉlรฉcharger le jeu de donnรฉes papillons et crรฉer un chargeur de donnรฉes ร  partir duquel nous pouvons รฉchantillonner un batch d'images :<jupyter_code># Pas sur Colab ? Les commentaires avec #@ permettent de modifier l'interface utilisateur comme les titres ou les entrรฉes # mais peuvent รชtre ignorรฉs si vous travaillez sur une plateforme diffรฉrente. dataset_name = "huggan/smithsonian_butterflies_subset" # @param dataset = load_dataset(dataset_name, split="train") image_size = 256 # @param batch_size = 4 # @param preprocess = transforms.Compose( [ transforms.Resize((image_size, image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def transform(examples): images = [preprocess(image.convert("RGB")) for image in examples["image"]] return {"images": images} dataset.set_transform(transform) train_dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True ) print("Previewing batch:") batch = next(iter(train_dataloader)) grid = torchvision.utils.make_grid(batch["images"], nrow=4) plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);<jupyter_output>Using custom data configuration huggan--smithsonian_butterflies_subset-7665b1021a37404c Found cached dataset parquet (/home/lewis_huggingface_co/.cache/huggingface/datasets/huggan___parquet/huggan--smithsonian_butterflies_subset-7665b1021a37404c/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>**Considรฉration 1 :** notre taille de batch ici (4) est assez petite, puisque nous entraรฎnons sur une grande taille d'image (256 pixels) en utilisant un modรจle assez grand et que nous manquerons de RAM du GPU si nous augmentons trop la taille du batch. Vous pouvez rรฉduire la taille de l'image pour accรฉlรฉrer les choses et permettre des batchs plus importants, mais ces modรจles ont รฉtรฉ conรงus et entraรฎnรฉs ร  l'origine pour une gรฉnรฉration de 256 pixels. Passons maintenant ร  la boucle d'entraรฎnement. Nous allons mettre ร  jour les poids du modรจle prรฉ-entraรฎnรฉ en fixant la cible d'optimisation ร  `image_pipe.unet.parameters()`. Le reste est presque identique ร  l'exemple de boucle d'entraรฎnement de l'unitรฉ 1. Cela prend environ 10 minutes ร  exรฉcuter sur Colab, c'est donc le bon moment pour prendre un cafรฉ ou un thรฉ pendant que vous attendez :<jupyter_code>num_epochs = 2 # @param lr = 1e-5 # 2param grad_accumulation_steps = 2 # @param optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr) losses = [] for epoch in range(num_epochs): for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)): clean_images = batch["images"].to(device) # bruit ร  ajouter aux images noise = torch.randn(clean_images.shape).to(clean_images.device) bs = clean_images.shape[0] # un pas de temps alรฉatoire pour chaque image timesteps = torch.randint( 0, image_pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device, ).long() # Ajouter du bruit aux images propres en fonction de la magnitude du bruit ร  chaque pas de temps # (il s'agit du processus de diffusion vers l'avant) noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps) # Obtenir la prรฉdiction du modรจle pour le bruit noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Comparez la prรฉdiction avec le bruit rรฉel : loss = F.mse_loss( noise_pred, noise ) # NB : essayer de prรฉdire le bruit (eps) pas (noisy_ims-clean_ims) ou juste (clean_ims) # Stocker pour un plot ultรฉrieur losses.append(loss.item()) # Mettre ร  jour les paramรจtres du modรจle avec l'optimiseur sur la base de cette perte loss.backward(loss) # Accumulation des gradients if (step + 1) % grad_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() print( f"Epoch {epoch} average loss: {sum(losses[-len(train_dataloader):])/len(train_dataloader)}" ) # Tracer la courbe de perte : plt.plot(losses)<jupyter_output><empty_output><jupyter_text>**Considรฉration 2 :** notre signal de perte est extrรชmement bruyant, puisque nous ne travaillons qu'avec quatre exemples ร  des niveaux de bruit alรฉatoires pour chaque รฉtape. Ce n'est pas idรฉal pour l'entraรฎnement. Une solution consiste ร  utiliser un taux d'apprentissage extrรชmement faible pour limiter la taille de la mise ร  jour ร  chaque รฉtape. Ce serait encore mieux si nous pouvions trouver un moyen d'obtenir les mรชmes avantages qu'en utilisant une taille de batch plus importante sans que les besoins en mรฉmoire ne montent en flรจche...Entrez dans [l'accumulation des gradients](https://kozodoi.me/python/deep%20learning/pytorch/tutorial/2021/02/19/gradient-accumulation.html:~:text=Simplement%20speaking%2C%20gradient%20accumulation%20means,may%20find%20 this%20tutorial%20use.). Si nous appelons `loss.backward()` plusieurs fois avant d'exรฉcuter `optimizer.step()` et `optimizer.zero_grad()`, PyTorch accumule (somme) les gradients, fusionnant effectivement le signal de plusieurs batchs pour donner une seule (meilleure) estimation qui est ensuite utilisรฉe pour mettre ร  jour les paramรจtres. Il en rรฉsulte moins de mises ร  jour totales, tout comme nous le verrions si nous utilisions une taille de batch plus importante. C'est quelque chose que de nombreux *frameworks* gรจrent pour vous (par exemple, ๐Ÿค— [Accelerate rend cela facile](https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation)), mais il est agrรฉable de le voir mis en ล“uvre ร  partir de zรฉro car il s'agit d'une technique utile pour traiter l'entraรฎnement sous les contraintes de mรฉmoire du GPU ! Comme vous pouvez le voir dans le code ci-dessus (aprรจs le commentaire Gradient accumulation), il n'y a pas vraiment besoin de beaucoup de code. **Exercice :** Voyez si vous pouvez ajouter l'accumulation des gradients ร  la boucle d'entraรฎnement de l'unitรฉ 1.Comment se comporte-t-elle ? Rรฉflรฉchissez ร  la maniรจre dont vous pourriez ajuster le taux d'apprentissage en fonction du nombre d'รฉtapes d'accumulation des gradients ; devrait-il rester identique ร  auparavant ? **Considรฉration 3 :** Cela prend encore beaucoup de temps, et afficher une mise ร  jour d'une ligne ร  chaque รฉpoque n'est pas suffisant pour nous donner une bonne idรฉe de ce qui se passe. Nous devrions probablement :- Gรฉnรฉrer quelques รฉchantillons de temps en temps pour examiner visuellement la performance qualitativement au fur et ร  mesure que le modรจle s'entraรฎne.- Enregistrer des รฉlรฉments tels que la perte et les gรฉnรฉrations d'รฉchantillons pendant l'entraรฎnement, peut-รชtre en utilisant quelque chose comme Weights and Biases ou Tensorboard.Nous avons crรฉรฉ un script rapide (finetune_model.py) qui reprend le code d'entraรฎnement ci-dessus et y ajoute une fonctionnalitรฉ minimale de *logging*. Vous pouvez voir les [logs d'un entraรฎnement ci-dessous](https://wandb.ai/johnowhitaker/dm_finetune/runs/2upaa341) :<jupyter_code>%wandb johnowhitaker/dm_finetune/2upaa341 # Vous aurez besoin d'un compte W&B pour que cela fonctionne - sautez si vous ne voulez pas vous connecter.<jupyter_output><empty_output><jupyter_text>Il est amusant de voir comment les รฉchantillons gรฉnรฉrรฉs changent au fur et ร  mesure que l'entraรฎnement progresse. Mรชme si la perte ne semble pas s'amรฉliorer beaucoup, on peut voir une progression du domaine original (images de chambres ร  coucher) vers les nouvelles donnรฉes d'entraรฎnement (wikiart). A la fin de ce *notebook* se trouve un code commentรฉ pour *finetunรฉ* un modรจle en utilisant ce script comme alternative ร  l'exรฉcution de la cellule ci-dessus. **Exercice :** Voyez si vous pouvez modifier l'exemple officiel de script d'entraรฎnement que nous avons vu dans l'unitรฉ 1 pour commencer avec un modรจle prรฉ-entraรฎnรฉ plutรดt que d'entraรฎner ร  partir de zรฉro.Comparez-le au script minimal dont le lien figure ci-dessus ; quelles sont les fonctionnalitรฉs supplรฉmentaires qui manquent au script minimal ? En gรฉnรฉrant quelques images avec ce modรจle, nous pouvons voir que ces visages ont dรฉjร  l'air trรจs รฉtranges !<jupyter_code>x = torch.randn(8, 3, 256, 256).to(device) # Batch de 8 for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);<jupyter_output><empty_output><jupyter_text>**Considรฉration 4 :** Le *finetuning* peut รชtre tout ร  fait imprรฉvisible ! Si nous entraรฎnions plus longtemps, nous pourrions voir des papillons parfaits. Mais les รฉtapes intermรฉdiaires peuvent รชtre extrรชmement intรฉressantes en elles-mรชmes, surtout si vos intรฉrรชts sont plutรดt artistiques ! Entraรฎnez sur des pรฉriodes trรจs courtes ou trรจs longues et faites varier le taux d'apprentissage pour voir comment cela affecte les types de rรฉsultats produits par le modรจle final. Code pour *finetuner* un modรจle en utilisant le script d'exemple minimal que nous avons utilisรฉ sur le modรจle de dรฉmonstration WikiArtSi vous souhaitez entraรฎner un modรจle similaire ร  celui que nous avons crรฉรฉ sur WikiArt, vous pouvez dรฉcommenter et exรฉcuter les cellules ci-dessous. Comme cela prend un certain temps et peut รฉpuiser la mรฉmoire de votre GPU, nous vous conseillons de le faire aprรจs avoir parcouru le reste de ce *notebook*.<jupyter_code>## Pour tรฉlรฉcharger le script de finetuning : # !wget https://github.com/huggingface/diffusion-models-class/raw/main/unit2/finetune_model.py ## Pour exรฉcuter le script, entraรฎnant le modรจle de visage sur des visages vintage ## (l'idรฉal est d'exรฉcuter ce script dans un terminal) : # !python finetune_model.py --image_size 128 --batch_size 8 --num_epochs 16\ # --grad_accumulation_steps 2 --start_model "google/ddpm-celebahq-256"\ # --dataset_name "Norod78/Vintage-Faces-FFHQAligned" --wandb_project 'dm-finetune'\ # --log_samples_every 100 --save_model_every 1000 --model_save_name 'vintageface'<jupyter_output><empty_output><jupyter_text>Sauvegarde et chargement des pipelines *finetunรฉs*Maintenant que nous avons *finetunรฉ* le UNet dans notre modรจle de diffusion, sauvegardons-le dans un dossier local en exรฉcutant :<jupyter_code>image_pipe.save_pretrained("my-finetuned-model")<jupyter_output><empty_output><jupyter_text>Comme nous l'avons vu dans l'unitรฉ 1, cela permet de sauvegarder la configuration, le modรจle et le planificateur :<jupyter_code>!ls {"my-finetuned-model"}<jupyter_output>model_index.json scheduler unet<jupyter_text>Ensuite, vous pouvez suivre les mรชmes รฉtapes que celles dรฉcrites dans le *notebook* d'introduction ร  *Diffusers* de l'unitรฉ 1 pour pousser le modรจle vers le Hub en vue d'une utilisation ultรฉrieure :<jupyter_code># Code pour tรฉlรฉcharger un pipeline sauvegardรฉ localement vers le Hub from huggingface_hub import HfApi, ModelCard, create_repo, get_full_repo_name # Mise en place du repo et tรฉlรฉchargement des fichiers model_name = "ddpm-celebahq-finetuned-butterflies-2epochs" # @param Le nom que vous souhaitez lui donner sur le Hub local_folder_name = "my-finetuned-model" # @param Crรฉรฉ par le script ou par vous via image_pipe.save_pretrained('save_name') description = "Describe your model here" # @param hub_model_id = get_full_repo_name(model_name) create_repo(hub_model_id) api = HfApi() api.upload_folder( folder_path=f"{local_folder_name}/scheduler", path_in_repo="", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{local_folder_name}/unet", path_in_repo="", repo_id=hub_model_id ) api.upload_file( path_or_fileobj=f"{local_folder_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) # Ajouter une carte modรจle (facultatif mais sympa !) content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class ๐Ÿงจ](https://github.com/huggingface/diffusion-models-class) {description} ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output><jupyter_text>Fรฉlicitations, vous avez maintenant *finetunรฉ* votre premier modรจle de diffusion !Pour le reste de ce notebook, nous utiliserons un [modรจle](https://huggingface.co/johnowhitaker/sd-class-wikiart-from-bedrooms) que nous avons *finetunรฉ* ร  partir d'un modรจle entraรฎnรฉ sur [LSUN bedrooms](https://huggingface.co/google/ddpm-bedroom-256) environ une fois sur le [WikiArt dataset](https://huggingface.co/datasets/huggan/wikiart). Si vous prรฉfรฉrez, vous pouvez sauter cette cellule et utiliser le pipeline faces/butterflies que nous avons *finetunรฉ* dans la section prรฉcรฉdente ou en charger un depuis le Hub ร  la place :<jupyter_code># Chargement du pipeline prรฉ-entraรฎnรฉ pipeline_name = "johnowhitaker/sd-class-wikiart-from-bedrooms" image_pipe = DDPMPipeline.from_pretrained(pipeline_name).to(device) # ร‰chantillon d'images avec un planificateur DDIM sur 40 รฉtapes scheduler = DDIMScheduler.from_pretrained(pipeline_name) scheduler.set_timesteps(num_inference_steps=40) # Point de dรฉpart alรฉatoire (batch de 8 images) x = torch.randn(8, 3, 256, 256).to(device) # Boucle d'รฉchantillonnage minimale for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = scheduler.step(noise_pred, t, x).prev_sample # Voir les rรฉsultats grid = torchvision.utils.make_grid(x, nrow=4) plt.imshow(grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5);<jupyter_output><empty_output><jupyter_text>**Considรฉration 5** : Il est souvent difficile de savoir si le *finetunรฉ* fonctionne bien, et ce que l'on entend par "bonnes performances" peut varier selon le cas d'utilisation. Par exemple, si vous *finetunรฉ* un modรจle conditionnรฉ par du texte comme Stable Diffusion sur un petit jeu de donnรฉes, vous voudrez probablement qu'il conserve la plus grande partie de son apprentissage original afin de pouvoir comprendre des prompts arbitraires non couverts par votre nouveau jeu de donnรฉes, tout en s'adaptant pour mieux correspondre au style de vos nouvelles donnรฉes d'entraรฎnement. Cela pourrait signifier l'utilisation d'un faible taux d'apprentissage avec quelque chose comme la moyenne exponentielle du modรจle, comme dรฉmontrรฉ dans cet excellent [article de blog](https://lambdalabs.com/blog/how-to-fine-tune-stable-diffusion-how-we-made-the-text-to-pokemon-model-at-lambda) sur la crรฉation d'une version Pokemon de Stable Diffusion. Dans une autre situation, vous pouvez vouloir rรฉ-entraรฎner complรจtement un modรจle sur de nouvelles donnรฉes (comme notre exemple chambre -> wikiart), auquel cas un taux d'apprentissage plus รฉlevรฉ et un entraรฎnement plus poussรฉ s'avรจrent judicieux. Mรชme si le [graphique de la perte] (https://wandb.ai/johnowhitaker/dm_finetune/runs/2upaa341) ne montre pas beaucoup d'amรฉlioration, les รฉchantillons s'รฉloignent clairement des donnรฉes d'origine et s'orientent vers des rรฉsultats plus "artistiques", bien qu'ils restent pour la plupart incohรฉrents.Ce qui nous amรจne ร  la section suivante, oรน nous examinons comment nous pourrions ajouter des conseils supplรฉmentaires ร  un tel modรจle pour mieux contrรดler les rรฉsultats. GuidageQue faire si l'on souhaite exercer un certain contrรดle sur les รฉchantillons gรฉnรฉrรฉs ? Par exemple, supposons que nous voulions biaiser les images gรฉnรฉrรฉes pour qu'elles soient d'une couleur spรฉcifique. Comment procรฉder ? C'est lร  qu'intervient le guidage, une technique qui permet d'ajouter un contrรดle supplรฉmentaire au processus d'รฉchantillonnage. La premiรจre รฉtape consiste ร  crรฉer notre fonction de conditionnement : une mesure (perte) que nous souhaitons minimiser. En voici une pour l'exemple de la couleur, qui compare les pixels d'une image ร  une couleur cible (par dรฉfaut, une sorte de sarcelle claire) et renvoie l'erreur moyenne :<jupyter_code>def color_loss(images, target_color=(0.1, 0.9, 0.5)): """ร‰tant donnรฉ une couleur cible (R, G, B), retourner une perte correspondant ร  la distance moyenne entre les pixels de l'image et cette couleur. Par dรฉfaut, il s'agit d'une couleur sarcelle claire : (0.1, 0.9, 0.5)""" target = ( torch.tensor(target_color).to(images.device) * 2 - 1 ) # Map target color to (-1, 1) target = target[ None, :, None, None ] # Obtenir la forme nรฉcessaire pour fonctionner avec les images (b, c, h, w) error = torch.abs( images - target ).mean() # Diffรฉrence absolue moyenne entre les pixels de l'image et la couleur cible return error<jupyter_output><empty_output><jupyter_text>Ensuite, nous allons crรฉer une version modifiรฉe de la boucle d'รฉchantillonnage oรน, ร  chaque รฉtape, nous ferons ce qui suit :- Crรฉer une nouvelle version de `x` avec `requires_grad = True`- Calculer la version dรฉbruitรฉe (`x0`)- Introduire la version prรฉdite `x0` dans notre fonction de perte- Trouver le gradient de cette fonction de perte par rapport ร  `x`- Utiliser ce gradient de conditionnement pour modifier `x` avant d'utiliser le planificateur, en espรฉrant pousser x dans une direction qui conduira ร  une perte plus faible selon notre fonction d'orientation.Il existe deux variantes que vous pouvez explorer. Dans la premiรจre, nous fixons `requires_grad` sur `x` aprรจs avoir obtenu notre prรฉdiction de bruit du UNet, ce qui est plus efficace en termes de mรฉmoire (puisque nous n'avons pas ร  retracer les gradients ร  travers le modรจle de diffusion), mais donne un gradient moins prรฉcis. Dans le second cas, nous dรฉfinissons d'abord `requires_grad` sur `x`, puis nous le faisons passer par l'unet et nous calculons le `x0` prรฉdit.<jupyter_code># Variante 1 : mรฉthode rapide # L'รฉchelle de guidance dรฉtermine l'intensitรฉ de l'effet guidance_loss_scale = 40 # Envisagez de modifier cette valeur ร  5, ou ร  100 x = torch.randn(8, 3, 256, 256).to(device) for i, t in tqdm(enumerate(scheduler.timesteps)): # Prรฉparer l'entrรฉe du modรจle model_input = scheduler.scale_model_input(x, t) # Prรฉdire le bruit rรฉsiduel with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] # Fixer x.requires_grad ร  True x = x.detach().requires_grad_() # Obtenir la valeur prรฉdite x0 x0 = scheduler.step(noise_pred, t, x).pred_original_sample # Calculer la perte loss = color_loss(x0) * guidance_loss_scale if i % 10 == 0: print(i, "loss:", loss.item()) # Obtenir le gradient cond_grad = -torch.autograd.grad(loss, x)[0] # Modifier x en fonction de ce gradient x = x.detach() + cond_grad # Le planificateur x = scheduler.step(noise_pred, t, x).prev_sample # Voir le rรฉsultat grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 Image.fromarray(np.array(im * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>Cette deuxiรจme option nรฉcessite presque le double de RAM GPU pour fonctionner, mรชme si nous ne gรฉnรฉrons qu'un batch de quatre images au lieu de huit. Voyez si vous pouvez repรฉrer la diffรฉrence et rรฉflรฉchissez ร  la raison pour laquelle cette mรฉthode est plus "prรฉcise" :<jupyter_code># Variante 2 : dรฉfinir x.requires_grad avant de calculer les prรฉdictions du modรจle guidance_loss_scale = 40 x = torch.randn(4, 3, 256, 256).to(device) for i, t in tqdm(enumerate(scheduler.timesteps)): # Dรฉfinir requires_grad avant la passe avant du modรจle x = x.detach().requires_grad_() model_input = scheduler.scale_model_input(x, t) # prรฉdire (avec grad cette fois) noise_pred = image_pipe.unet(model_input, t)["sample"] # Obtenir la valeur prรฉdite x0 : x0 = scheduler.step(noise_pred, t, x).pred_original_sample # Calculer la perte loss = color_loss(x0) * guidance_loss_scale if i % 10 == 0: print(i, "loss:", loss.item()) # Obtenir le gradient cond_grad = -torch.autograd.grad(loss, x)[0] # Modifier x en fonction de ce gradient x = x.detach() + cond_grad # Le planificateur x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 Image.fromarray(np.array(im * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>Dans la seconde variante, les besoins en mรฉmoire sont plus importants et l'effet est moins prononcรฉ, de sorte que vous pouvez penser qu'elle est infรฉrieure. Cependant, les rรฉsultats sont sans doute plus proches des types d'images sur lesquels le modรจle a รฉtรฉ entraรฎnรฉ, et vous pouvez toujours augmenter l'รฉchelle de guidage pour obtenir un effet plus important. L'approche que vous utiliserez dรฉpendra en fin de compte de ce qui fonctionne le mieux sur le plan expรฉrimental. **Exercice :** choisissez votre couleur prรฉfรฉrรฉe et recherchez ses valeurs dans l'espace RGB.Modifiez la ligne `color_loss()` dans la cellule ci-dessus pour recevoir ces nouvelles valeurs RGB et examinez les rรฉsultats ; correspondent-ils ร  ce que vous attendez ? Guidage avec CLIPGuider vers une couleur nous donne un peu de contrรดle, mais que se passerait-il si nous pouvions simplement taper un texte dรฉcrivant ce que nous voulons ?[CLIP](https://openai.com/research/clip) est un modรจle crรฉรฉ par OpenAI qui nous permet de comparer des images ร  des lรฉgendes textuelles. C'est extrรชmement puissant, car cela nous permet de quantifier ร  quel point une image correspond ร  un prompt. Et comme le processus est diffรฉrentiable, nous pouvons l'utiliser comme fonction de perte pour guider notre modรจle de diffusion !Nous n'entrerons pas dans les dรฉtails ici. L'approche de base est la suivante :- Enchรขsser le prompt pour obtenir un enchรขssement CLIP ร  512 dimensions- Pour chaque รฉtape du processus du modรจle de diffusion : - Crรฉer plusieurs variantes de l'image dรฉbruitรฉe prรฉdite (le fait d'avoir plusieurs variantes permet d'obtenir un signal de perte plus propre). - Pour chacune d'entre elles, enchรขsser l'image avec CLIP et comparez cet enchรขssement avec celui du prompt (ร  l'aide d'une mesure appelรฉe "distance du grand cercle").- Calculer le gradient de cette perte par rapport ร  l'image bruyante actuelle x et utiliser ce gradient pour modifier x avant de le mettre ร  jour avec le planificateur.Pour une explication plus approfondie de CLIP, consultez cette [leรงon sur le sujet] (https://johnowhitaker.github.io/tglcourse/clip.html) ou ce [rapport sur le projet OpenCLIP] (https://wandb.ai/johnowhitaker/openclip-benchmarking/reports/Exploring-OpenCLIP--VmlldzoyOTIzNzIz) que nous utilisons pour charger le modรจle CLIP. Exรฉcutez la cellule suivante pour charger un modรจle CLIP :<jupyter_code>import open_clip clip_model, _, preprocess = open_clip.create_model_and_transforms( "ViT-B-32", pretrained="openai" ) clip_model.to(device) # Transformations pour redimensionner et augmenter une image + normalisation pour correspondre aux donnรฉes entraรฎnรฉes par CLIP tfms = torchvision.transforms.Compose( [ torchvision.transforms.RandomResizedCrop(224), # CROP alรฉatoire ร  chaque fois torchvision.transforms.RandomAffine( 5 ), # Une augmentation alรฉatoire possible : biaiser l'image torchvision.transforms.RandomHorizontalFlip(), # Vous pouvez ajouter des augmentations supplรฉmentaires si vous le souhaitez torchvision.transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) # Et dรฉfinir une fonction de perte qui prend une image, l'enchรขsse et la compare avec les caractรฉristiques textuelles du prompt def clip_loss(image, text_features): image_features = clip_model.encode_image( tfms(image) ) # Note : applique les transformations ci-dessus input_normed = torch.nn.functional.normalize(image_features.unsqueeze(1), dim=2) embed_normed = torch.nn.functional.normalize(text_features.unsqueeze(0), dim=2) dists = ( input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2) ) # Distance du grand cercle return dists.mean()<jupyter_output>100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 354M/354M [00:02<00:00, 120MiB/s]<jupyter_text>Une fois la fonction de perte dรฉfinie, notre boucle d'รฉchantillonnage guidรฉ ressemble aux exemples prรฉcรฉdents, en remplaรงant `color_loss()` par notre nouvelle fonction de perte basรฉe sur CLIP :<jupyter_code>prompt = "Red Rose (still life), red flower painting" # @param # Explorer en changeant รงa guidance_scale = 8 # @param n_cuts = 4 # @param # Plus d'รฉtapes -> plus de temps pour que le guidage ait un effet scheduler.set_timesteps(50) # Nous enchรขssons un prompt avec CLIP comme cible text = open_clip.tokenize([prompt]).to(device) with torch.no_grad(), torch.cuda.amp.autocast(): text_features = clip_model.encode_text(text) x = torch.randn(4, 3, 256, 256).to( device ) # L'utilisation de la RAM est รฉlevรฉe, vous ne voulez peut-รชtre qu'une seule image ร  la fois. for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) # prรฉdire le bruit rรฉsiduel with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] cond_grad = 0 for cut in range(n_cuts): # nรฉcessite un grad sur x x = x.detach().requires_grad_() # Obtenir le x0 prรฉdit x0 = scheduler.step(noise_pred, t, x).pred_original_sample # Calculer la perte loss = clip_loss(x0, text_features) * guidance_scale # Obtenir le gradient (รฉchelle par n_cuts puisque nous voulons la moyenne) cond_grad -= torch.autograd.grad(loss, x)[0] / n_cuts if i % 25 == 0: print("Step:", i, ", Guidance loss:", loss.item()) # Modifier x en fonction de ce gradient alpha_bar = scheduler.alphas_cumprod[i] x = ( x.detach() + cond_grad * alpha_bar.sqrt() ) # Note the additional scaling factor here! # Le planificateur x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x.detach(), nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 Image.fromarray(np.array(im * 255).astype(np.uint8))<jupyter_output><empty_output><jupyter_text>Cela ressemble un peu ร  des roses ! Ce n'est pas parfait, mais si vous jouez avec les paramรจtres, vous pouvez obtenir des images agrรฉables. Si vous examinez le code ci-dessus, vous verrez que nous mettons ร  l'รฉchelle le gradient de conditionnement par un facteur de `alpha_bar.sqrt()`. Il existe des thรฉories sur la "bonne" maniรจre d'รฉchelonner ces gradients, mais en pratique, vous pouvez expรฉrimenter. Pour certains types de guidage, vous voudrez peut-รชtre que la plupart des effets soient concentrรฉs dans les premiรจres รฉtapes, pour d'autres (par exemple, une perte de style axรฉe sur les textures), vous prรฉfรฉrerez peut-รชtre qu'ils n'interviennent que vers la fin du processus de gรฉnรฉration. Quelques programmes possibles sont prรฉsentรฉs ci-dessous :<jupyter_code>plt.plot([1 for a in scheduler.alphas_cumprod], label="no scaling") plt.plot([a for a in scheduler.alphas_cumprod], label="alpha_bar") plt.plot([a.sqrt() for a in scheduler.alphas_cumprod], label="alpha_bar.sqrt()") plt.plot( [(1 - a).sqrt() for a in scheduler.alphas_cumprod], label="(1-alpha_bar).sqrt()" ) plt.legend() plt.title("Possible guidance scaling schedules");<jupyter_output><empty_output><jupyter_text>Expรฉrimentez avec diffรฉrents plannificateurs, รฉchelles de guidage et toute autre astuce ร  laquelle vous pouvez penser (l'รฉcrรชtage des gradients dans une certaine plage est une modification populaire) pour voir jusqu'ร  quel point vous pouvez obtenir ce rรฉsultat ! N'oubliez pas non plus d'essayer d'intervertir d'autres modรจles. Peut-รชtre le modรจle de visages que nous avons chargรฉ au dรฉbut ; pouvez-vous le guider de maniรจre fiable pour produire un visage masculin ? Que se passe-t-il si vous combinez le guidage CLIP avec la perte de couleur que nous avons utilisรฉe plus tรดt ? Etc.Si vous consultez [quelques codes pour la diffusion guidรฉe par CLIP en pratique](https://huggingface.co/spaces/EleutherAI/clip-guided-diffusion/blob/main/app.py), vous verrez une approche plus complexe avec une meilleure classe pour choisir des dรฉcoupes alรฉatoires dans les images et de nombreux ajustements supplรฉmentaires de la fonction de perte pour de meilleures performances. Avant l'apparition des modรจles de diffusion conditionnรฉs par le texte, il s'agissait du meilleur systรจme de conversion texte-image qui soit ! La petite version de notre jouet peut encore รชtre amรฉliorรฉe, mais elle capture l'idรฉe principale : grรขce au guidage et aux capacitรฉs รฉtonnantes de CLIP, nous pouvons ajouter le contrรดle du texte ร  un modรจle de diffusion inconditionnel ๐ŸŽจ. Partager une boucle d'รฉchantillonnage personnalisรฉe en tant que dรฉmo GradioVous avez peut-รชtre trouvรฉ une perte amusante pour guider la gรฉnรฉration et vous souhaitez maintenant partager avec le monde entier votre modรจle *finetunรฉ* et cette stratรฉgie d'รฉchantillonnage personnalisรฉe... Entrez dans [Gradio](https://gradio.app/). Gradio est un outil gratuit et open-source qui permet aux utilisateurs de crรฉer et de partager facilement des modรจles interactifs d'apprentissage automatique via une simple interface web. Avec Gradio, les utilisateurs peuvent construire des interfaces personnalisรฉes pour leurs modรจles d'apprentissage automatique, qui peuvent ensuite รชtre partagรฉs avec d'autres par le biais d'une URL unique. Il est รฉgalement intรฉgrรฉ ร  ๐Ÿค— Spaces, ce qui permet d'hรฉberger facilement des dรฉmos et de les partager avec d'autres.Nous placerons notre logique de base dans une fonction qui prend certaines entrรฉes et produit une image en sortie. Cette fonction peut ensuite รชtre enveloppรฉe dans une interface simple qui permet ร  l'utilisateur de spรฉcifier certains paramรจtres (qui sont transmis en tant qu'entrรฉes ร  la fonction principale de gรฉnรฉration). De nombreux [composants](https://gradio.app/docs/components) sont disponibles ; pour cet exemple, nous utiliserons un curseur pour l'รฉchelle d'orientation et un sรฉlecteur de couleurs pour dรฉfinir la couleur cible.<jupyter_code>!pip install -q gradio import gradio as gr from PIL import Image, ImageColor # La fonction qui fait le gros du travail def generate(color, guidance_loss_scale): target_color = ImageColor.getcolor(color, "RGB") # Couleur cible en RGB target_color = [a / 255 for a in target_color] # Rรฉรฉchelonner de (0, 255) ร  (0, 1) x = torch.randn(1, 3, 256, 256).to(device) for i, t in tqdm(enumerate(scheduler.timesteps)): model_input = scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = x.detach().requires_grad_() x0 = scheduler.step(noise_pred, t, x).pred_original_sample loss = color_loss(x0, target_color) * guidance_loss_scale cond_grad = -torch.autograd.grad(loss, x)[0] x = x.detach() + cond_grad x = scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5 im = Image.fromarray(np.array(im * 255).astype(np.uint8)) im.save("test.jpeg") return im # Voir la documentation de gradio pour les types d'entrรฉes et de sorties disponibles. inputs = [ gr.ColorPicker(label="color", value="55FFAA"), # Ajoutez ici toutes les entrรฉes dont vous avez besoin gr.Slider(label="guidance_scale", minimum=0, maximum=30, value=3), ] outputs = gr.Image(label="result") # Et l'interface minimale demo = gr.Interface( fn=generate, inputs=inputs, outputs=outputs, examples=[ ["#BB2266", 3], ["#44CCAA", 5], # Vous pouvez fournir des exemples d'entrรฉes pour aider les gens ร  dรฉmarrer ], ) demo.launch(debug=True) # debug=True vous permet de voir les erreurs et les sorties dans Colab<jupyter_output><empty_output>
diffusion-models-class/units/fr/unit2/finetuning_and_guidance.ipynb/0
{ "file_path": "diffusion-models-class/units/fr/unit2/finetuning_and_guidance.ipynb", "repo_id": "diffusion-models-class", "token_count": 15878 }
143
<jupyter_start><jupyter_text>Tout assembler (PyTorch) Installez la bibliothรจque ๐Ÿค— *Transformers* pour exรฉcuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] from transformers import AutoTokenizer checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) sequence = "J'ai attendu un cours dโ€™HuggingFace toute ma vie." model_inputs = tokenizer(sequence) sequence = "J'ai attendu un cours dโ€™HuggingFace toute ma vie." model_inputs = tokenizer(sequence) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] model_inputs = tokenizer(sequences) # Remplit les sรฉquences jusqu'ร  la longueur maximale de la sรฉquence model_inputs = tokenizer(sequences, padding="longest") # Remplit les sรฉquences jusqu'ร  la longueur maximale du modรจle (512 pour BERT ou DistilBERT) model_inputs = tokenizer(sequences, padding="max_length") # Remplit les sรฉquences jusqu'ร  la longueur maximale spรฉcifiรฉe model_inputs = tokenizer(sequences, padding="max_length", max_length=8) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] # Tronque les sรฉquences qui sont plus longues que la longueur maximale du modรจle # (512 pour BERT ou DistilBERT) model_inputs = tokenizer(sequences, truncation=True) # Tronque les sรฉquences qui sont plus longues que la longueur maximale spรฉcifiรฉe model_inputs = tokenizer(sequences, max_length=8, truncation=True) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] # Retourne des tenseurs PyTorch model_inputs = tokenizer(sequences, padding=True, return_tensors="pt") # Retourne des tenseurs TensorFlow model_inputs = tokenizer(sequences, padding=True, return_tensors="tf") # Retourne des tableaux NumPy model_inputs = tokenizer(sequences, padding=True, return_tensors="np") sequence = "J'ai attendu un cours de HuggingFace toute ma vie." model_inputs = tokenizer(sequence) print(model_inputs["input_ids"]) tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) print(tokenizer.decode(model_inputs["input_ids"])) print(tokenizer.decode(ids)) import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint, from_tf=True) sequences = [ "J'ai attendu un cours de HuggingFace toute ma vie.", "Moi aussi !", ] tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt") output = model(**tokens)<jupyter_output><empty_output>
notebooks/course/fr/chapter2/section6_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter2/section6_pt.ipynb", "repo_id": "notebooks", "token_count": 974 }
144
<jupyter_start><jupyter_text>Crรฉation de votre propre jeu de donnรฉes Installez les bibliothรจques ๐Ÿค— Transformers et ๐Ÿค— Datasets pour exรฉcuter ce *notebook*.<jupyter_code>!pip install datasets evaluate transformers[sentencepiece] !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez รฉgalement รชtre connectรฉ au *Hub* d'Hugging Face. Exรฉcutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() !pip install requests import requests url = "https://api.github.com/repos/huggingface/datasets/issues?page=1&per_page=1" response = requests.get(url) response.status_code response.json() GITHUB_TOKEN = xxx # Copiez votre jeton GitHub ici headers = {"Authorization": f"token {GITHUB_TOKEN}"} import time import math from pathlib import Path import pandas as pd from tqdm.notebook import tqdm def fetch_issues( owner="huggingface", repo="datasets", num_issues=10_000, rate_limit=5_000, issues_path=Path("."), ): if not issues_path.is_dir(): issues_path.mkdir(exist_ok=True) batch = [] all_issues = [] per_page = 100 # Nombre d'issues ร  renvoyer par page num_pages = math.ceil(num_issues / per_page) base_url = "https://api.github.com/repos" for page in tqdm(range(num_pages)): # Requรชte avec state=all pour obtenir les questions ouvertes et fermรฉes query = f"issues?page={page}&per_page={per_page}&state=all" issues = requests.get(f"{base_url}/{owner}/{repo}/{query}", headers=headers) batch.extend(issues.json()) if len(batch) > rate_limit and len(all_issues) < num_issues: all_issues.extend(batch) batch = [] # Vider le batch pour la prochaine pรฉriode de temps print(f"Reached GitHub rate limit. Sleeping for one hour ...") time.sleep(60 * 60 + 1) all_issues.extend(batch) df = pd.DataFrame.from_records(all_issues) df.to_json(f"{issues_path}/{repo}-issues.jsonl", orient="records", lines=True) print( f"Downloaded all the issues for {repo}! Dataset stored at {issues_path}/{repo}-issues.jsonl" ) # En fonction de votre connexion Internet, l'exรฉcution peut prendre plusieurs minutes... fetch_issues() issues_dataset = load_dataset("json", data_files="datasets-issues.jsonl", split="train") issues_dataset sample = issues_dataset.shuffle(seed=666).select(range(3)) # Afficher l'URL et les entrรฉes de la demande de tirage for url, pr in zip(sample["html_url"], sample["pull_request"]): print(f">> URL: {url}") print(f">> Pull request: {pr}\n") issues_dataset = issues_dataset.map( lambda x: {"is_pull_request": False if x["pull_request"] is None else True} ) issue_number = 2792 url = f"https://api.github.com/repos/huggingface/datasets/issues/{issue_number}/comments" response = requests.get(url, headers=headers) response.json() def get_comments(issue_number): url = f"https://api.github.com/repos/huggingface/datasets/issues/{issue_number}/comments" response = requests.get(url, headers=headers) return [r["body"] for r in response.json()] # Tester notre fonction fonctionne comme prรฉvu get_comments(2792) # Selon votre connexion internet, cela peut prendre quelques minutes... issues_with_comments_dataset = issues_dataset.map( lambda x: {"comments": get_comments(x["number"])} ) issues_with_comments_dataset.to_json("issues-datasets-with-comments.jsonl") from huggingface_hub import list_datasets all_datasets = list_datasets() print(f"Number of datasets on Hub: {len(all_datasets)}") print(all_datasets[0]) from huggingface_hub import notebook_login notebook_login() from huggingface_hub import create_repo repo_url = create_repo(name="github-issues", repo_type="dataset") repo_url from huggingface_hub import Repository repo = Repository(local_dir="github-issues", clone_from=repo_url) !cp datasets-issues-with-comments.jsonl github-issues/ repo.lfs_track("*.jsonl") repo.push_to_hub() remote_dataset = load_dataset("lewtun/github-issues", split="train") remote_dataset<jupyter_output><empty_output>
notebooks/course/fr/chapter5/section5.ipynb/0
{ "file_path": "notebooks/course/fr/chapter5/section5.ipynb", "repo_id": "notebooks", "token_count": 1679 }
145
<jupyter_start><jupyter_text>Finetuner un modรจle de language masquรฉ (PyTorch) Installez les bibliothรจques ๐Ÿค— *Datasets*, ๐Ÿค— *Transformers* et ๐Ÿค— *Accelerate* pour exรฉcuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install accelerate # Pour exรฉcuter l'entraรฎnement sur TPU, vous devez dรฉcommenter la ligne suivante : # !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez รฉgalement รชtre connectรฉ au Hub d'Hugging Face. Exรฉcutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from transformers import AutoModelForMaskedLM model_checkpoint = "camembert-base" model = AutoModelForMaskedLM.from_pretrained(model_checkpoint) text = "C'est une grande <mask>." from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) import torch inputs = tokenizer(text, return_tensors="pt") token_logits = model(**inputs).logits # Trouver l'emplacement du <mask> et extraire ses logits mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] mask_token_logits = token_logits[0, mask_token_index, :] # Choisir les <mask> candidats avec les logits les plus รฉlevรฉs top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist() for token in top_5_tokens: print(f"'>>> {text.replace(tokenizer.mask_token, tokenizer.decode([token]))}'") from datasets import load_dataset imdb_dataset = load_dataset("allocine") imdb_dataset sample = imdb_dataset["train"].shuffle(seed=42).select(range(3)) for row in sample: print(f"\n'>>> Review: {row['review']}'") print(f"'>>> Label: {row['label']}'") def tokenize_function(examples): result = tokenizer(examples["review"]) if tokenizer.is_fast: result["word_ids"] = [result.word_ids(i) for i in range(len(result["input_ids"]))] return result # Utilisez batched=True pour activer le multithreading rapide ! tokenized_datasets = imdb_dataset.map( tokenize_function, batched=True, remove_columns=["review", "label"] ) tokenized_datasets tokenizer.model_max_length chunk_size = 128 # Le dรฉcoupage produit une liste de listes pour chaque caractรฉristique tokenized_samples = tokenized_datasets["train"][:3] for idx, sample in enumerate(tokenized_samples["input_ids"]): print(f"'>>> Review {idx} length: {len(sample)}'") concatenated_examples = { k: sum(tokenized_samples[k], []) for k in tokenized_samples.keys() } total_length = len(concatenated_examples["input_ids"]) print(f"'>>> Concatenated reviews length: {total_length}'") chunks = { k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)] for k, t in concatenated_examples.items() } for chunk in chunks["input_ids"]: print(f"'>>> Chunk length: {len(chunk)}'") def group_texts(examples): # Concatรฉnation de tous les textes concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} # Calculer la longueur des textes concatรฉnรฉs total_length = len(concatenated_examples[list(examples.keys())[0]]) # Nous laissons tomber le dernier morceau s'il est plus petit que chunk_size total_length = (total_length // chunk_size) * chunk_size # Fractionnement par morceaux de max_len result = { k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)] for k, t in concatenated_examples.items() } # Crรฉer une nouvelle colonne d'รฉtiquettes result["labels"] = result["input_ids"].copy() return result lm_datasets = tokenized_datasets.map(group_texts, batched=True) lm_datasets tokenizer.decode(lm_datasets["train"][1]["input_ids"]) from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15) samples = [lm_datasets["train"][i] for i in range(2)] for sample in samples: _ = sample.pop("word_ids") for chunk in data_collator(samples)["input_ids"]: print(f"\n'>>> {tokenizer.decode(chunk)}'") import collections import numpy as np from transformers import default_data_collator wwm_probability = 0.2 def whole_word_masking_data_collator(features): for feature in features: word_ids = feature.pop("word_ids") # Crรฉation d'une correspondance entre les mots et les indices des tokens correspondants mapping = collections.defaultdict(list) current_word_index = -1 current_word = None for idx, word_id in enumerate(word_ids): if word_id is not None: if word_id != current_word: current_word = word_id current_word_index += 1 mapping[current_word_index].append(idx) # Masquer des mots de faรงon alรฉatoire mask = np.random.binomial(1, wwm_probability, (len(mapping),)) input_ids = feature["input_ids"] labels = feature["labels"] new_labels = [-100] * len(labels) for word_id in np.where(mask)[0]: word_id = word_id.item() for idx in mapping[word_id]: new_labels[idx] = labels[idx] input_ids[idx] = tokenizer.mask_token_id feature["labels"] = new_labels return default_data_collator(features) samples = [lm_datasets["train"][i] for i in range(2)] batch = whole_word_masking_data_collator(samples) for chunk in batch["input_ids"]: print(f"\n'>>> {tokenizer.decode(chunk)}'") train_size = 10_000 test_size = int(0.1 * train_size) downsampled_dataset = lm_datasets["train"].train_test_split( train_size=train_size, test_size=test_size, seed=42 ) downsampled_dataset from transformers import TrainingArguments batch_size = 64 # Montrer la perte d'entraรฎnement ร  chaque รฉpoque logging_steps = len(downsampled_dataset["train"]) // batch_size model_name = model_checkpoint.split("/")[-1] training_args = TrainingArguments( output_dir=f"{model_name}-finetuned-allocine", overwrite_output_dir=True, evaluation_strategy="epoch", learning_rate=2e-5, weight_decay=0.01, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, push_to_hub=True, fp16=True, logging_steps=logging_steps, ) from transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=downsampled_dataset["train"], eval_dataset=downsampled_dataset["test"], data_collator=data_collator, tokenizer=tokenizer, ) import math eval_results = trainer.evaluate() print(f">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}") trainer.train() eval_results = trainer.evaluate() print(f">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}") trainer.push_to_hub() def insert_random_mask(batch): features = [dict(zip(batch, t)) for t in zip(*batch.values())] masked_inputs = data_collator(features) # Crรฉer une nouvelle colonne "masquรฉe" pour chaque colonne du jeu de donnรฉes return {"masked_" + k: v.numpy() for k, v in masked_inputs.items()} downsampled_dataset = downsampled_dataset.remove_columns(["word_ids"]) eval_dataset = downsampled_dataset["test"].map( insert_random_mask, batched=True, remove_columns=downsampled_dataset["test"].column_names, ) eval_dataset = eval_dataset.rename_columns( { "masked_input_ids": "input_ids", "masked_attention_mask": "attention_mask", "masked_labels": "labels", } ) from torch.utils.data import DataLoader from transformers import default_data_collator batch_size = 64 train_dataloader = DataLoader( downsampled_dataset["train"], shuffle=True, batch_size=batch_size, collate_fn=data_collator, ) eval_dataloader = DataLoader( eval_dataset, batch_size=batch_size, collate_fn=default_data_collator ) from torch.optim import AdamW optimizer = AdamW(model.parameters(), lr=5e-5) from accelerate import Accelerator accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) from transformers import get_scheduler num_train_epochs = 3 num_update_steps_per_epoch = len(train_dataloader) num_training_steps = num_train_epochs * num_update_steps_per_epoch lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps, ) from huggingface_hub import get_full_repo_name model_name = "camembert-base-finetuned-allocine-accelerate" repo_name = get_full_repo_name(model_name) repo_name from huggingface_hub import Repository output_dir = model_name repo = Repository(output_dir, clone_from=repo_name) from tqdm.auto import tqdm import torch import math progress_bar = tqdm(range(num_training_steps)) for epoch in range(num_train_epochs): # Entraรฎnement model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) # Evaluation model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather(loss.repeat(batch_size))) losses = torch.cat(losses) losses = losses[: len(eval_dataset)] try: perplexity = math.exp(torch.mean(losses)) except OverflowError: perplexity = float("inf") print(f">>> Epoch {epoch}: Perplexity: {perplexity}") # Sauvegarder et tรฉlรฉcharger accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False ) from transformers import pipeline mask_filler = pipeline( "fill-mask", model="huggingface-course/camembert-base-finetuned-allocine", tokenizer="huggingface-course/camembert-base-finetuned-allocine", ) preds = mask_filler(text) for pred in preds: print(f">>> {pred['sequence']}")<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section3_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section3_pt.ipynb", "repo_id": "notebooks", "token_count": 4290 }
146
<jupyter_start><jupyter_text>Construire votre premiรจre dรฉmo Installez les bibliothรจques ๐Ÿค— Transformers et ๐Ÿค— Gradio pour exรฉcuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install gradio import gradio as gr def greet(name): return "Bonjour " + name demo = gr.Interface(fn=greet, inputs="text", outputs="text") demo.launch() import gradio as gr def greet(name): return "Bonjour " + name # Nous instancions la classe Textbox textbox = gr.Textbox(label="Tapez votre nom ici :", placeholder="Marie Martin", lines=2) gr.Interface(fn=greet, inputs=textbox, outputs="text").launch() from transformers import pipeline model = pipeline("text-generation", model="asi/gpt-fr-cased-small") def predict(prompt): completion = model(prompt)[0]["generated_text"] return completion import gradio as gr gr.Interface(fn=predict, inputs="text", outputs="text").launch()<jupyter_output><empty_output>
notebooks/course/fr/chapter9/section2.ipynb/0
{ "file_path": "notebooks/course/fr/chapter9/section2.ipynb", "repo_id": "notebooks", "token_count": 326 }
147
<jupyter_start><jupyter_text>In-painting pipeline for Stable Diffusion using ๐Ÿงจ Diffusers This notebook shows how to do text-guided in-painting with Stable Diffusion model using ๐Ÿค— Hugging Face [๐Ÿงจ Diffusers library](https://github.com/huggingface/diffusers). For a general introduction to the Stable Diffusion model please refer to this [colab](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb).<jupyter_code>!pip install -qq -U diffusers==0.11.1 transformers ftfy gradio accelerate<jupyter_output><empty_output><jupyter_text>To use private and gated models on ๐Ÿค— Hugging Face Hub, login is required. If you are only using a public checkpoint (such as `runwayml/stable-diffusion-inpainting` in this notebook), you can skip this step.<jupyter_code>from huggingface_hub import notebook_login notebook_login() import inspect from typing import List, Optional, Union import numpy as np import torch import PIL import gradio as gr from diffusers import StableDiffusionInpaintPipeline device = "cuda" model_path = "runwayml/stable-diffusion-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_path, torch_dtype=torch.float16, ).to(device) import requests from io import BytesIO def image_grid(imgs, rows, cols): assert len(imgs) == rows*cols w, h = imgs[0].size grid = PIL.Image.new('RGB', size=(cols*w, rows*h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid def download_image(url): response = requests.get(url) return PIL.Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" image = download_image(img_url).resize((512, 512)) image mask_image = download_image(mask_url).resize((512, 512)) mask_image prompt = "a mecha robot sitting on a bench" guidance_scale=7.5 num_samples = 3 generator = torch.Generator(device="cuda").manual_seed(0) # change the seed to get different results images = pipe( prompt=prompt, image=image, mask_image=mask_image, guidance_scale=guidance_scale, generator=generator, num_images_per_prompt=num_samples, ).images # insert initial image in the list so we can compare side by side images.insert(0, image) image_grid(images, 1, num_samples + 1)<jupyter_output><empty_output><jupyter_text>Gradio Demo<jupyter_code>def predict(dict, prompt): image = dict['image'].convert("RGB").resize((512, 512)) mask_image = dict['mask'].convert("RGB").resize((512, 512)) images = pipe(prompt=prompt, image=image, mask_image=mask_image).images return(images[0]) gr.Interface( predict, title = 'Stable Diffusion In-Painting', inputs=[ gr.Image(source = 'upload', tool = 'sketch', type = 'pil'), gr.Textbox(label = 'prompt') ], outputs = [ gr.Image() ] ).launch(debug=True)<jupyter_output>Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch(). Running on public URL: https://e52f060882d60b09.gradio.app This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces
notebooks/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb/0
{ "file_path": "notebooks/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb", "repo_id": "notebooks", "token_count": 1254 }
148
# IDEFICS Demos/examples ## Inference - [Normal inference](inference.py) (needs ~20GB GPU memory) - [4bit quantized inference](inference_4bit.py) (needs ~7GB GPU memory) ## Finetuning The following demos use the Image captioning task: - [PEFT (LORA) finetuning (notebook)](finetune_image_captioning_peft.ipynb) (fits on Google colab) - [Normal finetuning](finetune_image_captioning.py) (needs ~40GB GPU memory)
notebooks/examples/idefics/README.md/0
{ "file_path": "notebooks/examples/idefics/README.md", "repo_id": "notebooks", "token_count": 148 }
149
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install ๐Ÿค— Transformers as well as some other libraries. Uncomment the following cell and run it.<jupyter_code># Install !pip install -q biopython transformers datasets huggingface_hub accelerate<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to login to the huggingface hub<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code>!apt install git-lfs<jupyter_output>Reading package lists... Done Building dependency tree Reading state information... Done git-lfs is already the newest version (2.9.2-1). 0 upgraded, 0 newly installed, 0 to remove and 16 not upgraded.<jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("nucleotide_transformer_dna_sequence_modeling_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>**Fine-Tuning the Nucleotide-transformer** The **Nucleotide Transformer** paper [Dalla-torre et al, 2023](https://www.biorxiv.org/content/10.1101/2023.01.11.523679v2) introduces 4 genomics foundational models developed by **InstaDeep**. These transformers, of various sizes and trained on different datasets, allow powerful representations of DNA sequences that allow to tackle a very diverse set of problems such as chromatin accessibility, deleteriousness prediction, promoter and enhancer prediction etc... These representations can be extracted from the transformer and used as proxies of the DNA sequences (this is called probing) or the transformer can be trained further on a specific task (this is called finetuning). This notebook allows you to fine-tune these models.The model we are going to use is the [500M Human Ref model](https://huggingface.co/InstaDeepAI/nucleotide-transformer-500m-1000g), which is a 500M parameters transformer pre-trained on the human reference genome, per the training methodology presented in the Nucleotide Transformer Paper. It is one of the 4 models introduced, all available on the [Instadeep HuggingFace page](https://huggingface.co/InstaDeepAI):```| Model name | Num layers | Num parameters | Training dataset ||---------------------|------------|----------------|------------------------|| `500M Human Ref` | 24 | 500M | Human reference genome || `500M 1000G` | 24 | 500M | 1000G genomes || `2.5B 1000G` | 32 | 2.5B | 1000G genomes || `2.5B Multispecies` | 32 | 2.5B | Multi-species dataset |```Note that using the larger models will require more GPU memory and produce longer finetuning timesIn the following, we showcase the nucleotide transformer ability to classify genomic sequences as two of the most basic genomic motifs: **promoters** and **enhancers types**. Both of them are classification task, but the enhancers types task is much more challenging with its 3 classes.These two tasks are still very basic, but the nucleotide transformers have been shown to beat/match state of the art models on much more complex tasks such as [DeepSEA](https://www.nature.com/articles/nmeth.3547), which, given a DNA sequence, predicts 919 chromatin profiles from a diverse set of human cells and tissues from a single sequence or [DeepSTARR](https://www.nature.com/articles/s41588-022-01048-5), which predicts an enhancer's activity. **Importing required packages** **Import and install**<jupyter_code># Imports from transformers import AutoTokenizer, TrainingArguments, Trainer, AutoModelForSequenceClassification import torch from sklearn.metrics import matthews_corrcoef, f1_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # Define the working device device = torch.device("cuda")<jupyter_output><empty_output><jupyter_text>**Prepare and create the model for fine-tuning** The nucleotide transformer will be fine-tuned on two **classification tasks**: **promoter** and **enhancer types** classification.The `AutoModelForSequenceClassification` module automatically loads the model and adds a simple classification head on top of the final embeddings. **First task : Promoter prediction** Promoter prediction is a **sequence classification** problem, in which the DNA sequence is predicted to be either a promoter or not.A promoter is a region of DNA where transcription of a gene is initiated. Promoters are a vital component of expression vectors because they control the binding of RNA polymerase to DNA. RNA polymerase transcribes DNA to mRNA which is ultimately translated into a functional protein This task was introduced in [DeePromoter](https://www.frontiersin.org/articles/10.3389/fgene.2019.00286/full), where a set of TATA and non-TATA promoters was gathered. A negative sequence was generated from each promoter, by randomly sampling subsets of the sequence, to guarantee that some obvious motifs were present both in the positive and negative dataset.<jupyter_code>num_labels_promoter = 2 # Load the model model = AutoModelForSequenceClassification.from_pretrained("InstaDeepAI/nucleotide-transformer-500m-human-ref", num_labels=num_labels_promoter) model = model.to(device)<jupyter_output><empty_output><jupyter_text>**Dataset loading and preparation**<jupyter_code>from datasets import load_dataset, Dataset # Load the promoter dataset from the InstaDeep Hugging Face ressources dataset_name = "promoter_all" train_dataset_promoter = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="train", streaming= False, ) test_dataset_promoter = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="test", streaming= False, ) # Get training data train_sequences_promoter = train_dataset_promoter['sequence'] train_labels_promoter = train_dataset_promoter['label'] # Split the dataset into a training and a validation dataset train_sequences_promoter, validation_sequences_promoter, train_labels_promoter, validation_labels_promoter = train_test_split(train_sequences_promoter, train_labels_promoter, test_size=0.05, random_state=42) # Get test data test_sequences_promoter = test_dataset_promoter['sequence'] test_labels_promoter = test_dataset_promoter['label']<jupyter_output><empty_output><jupyter_text>Let us have a look at the data. If we extract the last sequence of the dataset, we see that it is indeed a promoter, as its label is 1. Furthermore, we can also see that it is a TATA promoter, as the TATA motif is present at the 221th nucleotide of the sequence!<jupyter_code>idx_sequence = -1 sequence, label = train_sequences_promoter[idx_sequence], train_labels_promoter[idx_sequence] print(f"The DNA sequence is {sequence}.") print(f"Its associated label is label {label}.") idx_TATA = sequence.find("TATA") print(f"This promoter is a TATA promoter, as the TATA motif is present at the {idx_TATA}th nucleotide.")<jupyter_output>The DNA sequence is CACACCAGACAAAATTTGGTTAATTTGCGCCCAATATTCATTACTTTGACCTAACCTTTGTTCTGAAGGCCGTGTACAAGGACAAGGCCCTGAGATTATTGCAACAGTAACTTGAAAAACTTTCAGAAGTCTATTCTGTAGGATTAAAGGAATGCTGAGACTATTCAAGTTTGAAGTCCTGGGGGTGGGGAAAAATAAAAAACCTGTGCTAGAAAGCTTAGTATAGCATGTAACTTTAGAGTCCTGTGGAGTCCTGAGTCTCCCACAGACCAGAACAGTCATTTAAAAGTTTTCAGGAAA. Its associated label is label 1. This promoter is a TATA promoter, as the TATA motif is present at the 221th nucleotide.<jupyter_text>**Tokenizing the datasets** All inputs to neural nets must be numerical. The process of converting strings into numerical indices suitable for a neural net is called **tokenization**.<jupyter_code># Load the tokenizer tokenizer = AutoTokenizer.from_pretrained("InstaDeepAI/nucleotide-transformer-500m-human-ref") # Promoter dataset ds_train_promoter = Dataset.from_dict({"data": train_sequences_promoter,'labels':train_labels_promoter}) ds_validation_promoter = Dataset.from_dict({"data": validation_sequences_promoter,'labels':validation_labels_promoter}) ds_test_promoter = Dataset.from_dict({"data": test_sequences_promoter,'labels':test_labels_promoter}) def tokenize_function(examples): outputs = tokenizer(examples["data"]) return outputs # Creating tokenized promoter dataset tokenized_datasets_train_promoter = ds_train_promoter.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_validation_promoter = ds_validation_promoter.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_test_promoter = ds_test_promoter.map( tokenize_function, batched=True, remove_columns=["data"], )<jupyter_output><empty_output><jupyter_text>**Fine-tuning and evaluation** The hyper-parameters introduced here are different from the ones used in the paper since we are training the whole model. Further hyper-parameters search will surely improve the performance on the task!.We initialize our `TrainingArguments`. These control the various training hyperparameters, and will be passed to our `Trainer`.<jupyter_code>batch_size = 8 model_name='nucleotide-transformer' args_promoter = TrainingArguments( f"{model_name}-finetuned-NucleotideTransformer", remove_unused_columns=False, evaluation_strategy="steps", save_strategy="steps", learning_rate=1e-5, per_device_train_batch_size=batch_size, gradient_accumulation_steps= 1, per_device_eval_batch_size= 64, num_train_epochs= 2, logging_steps= 100, load_best_model_at_end=True, # Keep the best model according to the evaluation metric_for_best_model="f1_score", label_names=["labels"], dataloader_drop_last=True, max_steps= 1000 )<jupyter_output><empty_output><jupyter_text>Next, we define the metric we will use to evaluate our models and write a `compute_metrics` function. We can load this from the `scikit-learn` library.<jupyter_code># Define the metric for the evaluation using the f1 score def compute_metrics_f1_score(eval_pred): """Computes F1 score for binary classification""" predictions = np.argmax(eval_pred.predictions, axis=-1) references = eval_pred.label_ids r={'f1_score': f1_score(references, predictions)} return r trainer = Trainer( model.to(device), args_promoter, train_dataset= tokenized_datasets_train_promoter, eval_dataset= tokenized_datasets_validation_promoter, tokenizer=tokenizer, compute_metrics=compute_metrics_f1_score, )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>train_results = trainer.train()<jupyter_output>/usr/local/lib/python3.10/dist-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning warnings.warn(<jupyter_text>Note that the finetuning is done with a small batch size (8). The training time can be reduced by increasing the batch size, as it leverages parallelism in the GPU. **Validation F1 score**<jupyter_code>curve_evaluation_f1_score =[[a['step'],a['eval_f1_score']] for a in trainer.state.log_history if 'eval_f1_score' in a.keys()] eval_f1_score = [c[1] for c in curve_evaluation_f1_score] steps = [c[0] for c in curve_evaluation_f1_score] plt.plot(steps, eval_f1_score, 'b', label='Validation F1 score') plt.title('Validation F1 score for promoter prediction') plt.xlabel('Number of training steps performed') plt.ylabel('Validation F1 score') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>**F1 score on the test dataset**<jupyter_code># Compute the F1 score on the test dataset : print(f"F1 score on the test dataset: {trainer.predict(tokenized_datasets_test_promoter).metrics['test_f1_score']}")<jupyter_output><empty_output><jupyter_text>For the promoter prediction task, we obtain a perforance that is already close to the one displayed in the [**article**](https://www.biorxiv.org/content/10.1101/2023.01.11.523679v1.full.pdf) by training on only 1000 steps. A F1 score of **0.938** is obtained after just **1000 training steps**. To get closer to the **0.954** score obtained in the nucleotide transformer paper after 10,000 training steps, we surely need to train for longer! **Second task : Enhancer type prediction** In this section, we fine-tune the nucleotide transformer model on **enhancer type prediction**, which consists in classifying a DNA sequence as **strong**, **weak** or **non enhancer**.In genetics, an enhancer is a short (50โ€“1500 bp) region of DNA that can be bound by proteins (activators) to increase the likelihood that transcription of a particular gene will occur.[A deep learning framework for enhancer prediction using word embedding and sequence generation](https://www.sciencedirect.com/science/article/abs/pii/S0301462222000643) introduced the dataset used here by augmenting an original set of enhancers with 6000 synthetic enhancers and 6000 syntheticnon-enhancers produced through a generative model. Model<jupyter_code>num_labels_enhancers_types = 3 # Load the model model = AutoModelForSequenceClassification.from_pretrained("InstaDeepAI/nucleotide-transformer-500m-human-ref", num_labels=num_labels_enhancers_types) model = model.to(device)<jupyter_output>Some weights of the model checkpoint at InstaDeepAI/nucleotide-transformer-500m-human-ref were not used when initializing EsmForSequenceClassification: ['lm_head.layer_norm.weight', 'lm_head.layer_norm.bias', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.decoder.weight', 'lm_head.bias'] - This IS expected if you are initializing EsmForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing EsmForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of EsmForSequenceClassification were not initialized from the model checkpoint at InstaDeepAI/nucleotide-transformer-500m-human-ref and are newly initialized: ['classifier.out_proj.bias', 'classifier[...]<jupyter_text>**Dataset loading and preparation**<jupyter_code>from datasets import load_dataset, Dataset # Load the enhancers dataset from the InstaDeep Hugging Face ressources dataset_name = "enhancers_types" train_dataset_enhancers = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="train", streaming= False, ) test_dataset_enhancers = load_dataset( "InstaDeepAI/nucleotide_transformer_downstream_tasks", dataset_name, split="test", streaming= False, ) # Get training data train_sequences_enhancers = train_dataset_enhancers['sequence'] train_labels_enhancers = train_dataset_enhancers['label'] # Split the dataset into a training and a validation dataset train_sequences_enhancers, validation_sequences_enhancers, train_labels_enhancers, validation_labels_enhancers = train_test_split(train_sequences_enhancers, train_labels_enhancers, test_size=0.10, random_state=42) # Get test data test_sequences_enhancers = test_dataset_enhancers['sequence'] test_labels_enhancers = test_dataset_enhancers['label']<jupyter_output><empty_output><jupyter_text>**Tokenizing the datasets**<jupyter_code># Enhancer dataset ds_train_enhancers = Dataset.from_dict({"data": train_sequences_enhancers,'labels':train_labels_enhancers}) ds_validation_enhancers = Dataset.from_dict({"data": validation_sequences_enhancers,'labels':validation_labels_enhancers}) ds_test_enhancers = Dataset.from_dict({"data": test_sequences_enhancers,'labels':test_labels_enhancers}) # Creating tokenized enhancer dataset tokenized_datasets_train_enhancers = ds_train_enhancers.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_validation_enhancers = ds_validation_enhancers.map( tokenize_function, batched=True, remove_columns=["data"], ) tokenized_datasets_test_enhancers = ds_test_enhancers.map( tokenize_function, batched=True, remove_columns=["data"], )<jupyter_output><empty_output><jupyter_text>**Fine-tuning and evaluation** As with the promoters task, the hyper-parameters introduced here are different from the ones used in the paper since we are training the whole model. Further hyper-parameters search will surely improve the performance on the task!.We initialize our `TrainingArguments`. These control the various training hyperparameters, and will be passed to our `Trainer`.<jupyter_code>batch_size = 8 model_name='nucleotide-transformer' args_enhancers = TrainingArguments( f"{model_name}-finetuned-NucleotideTransformer", remove_unused_columns=False, evaluation_strategy="steps", save_strategy="steps", learning_rate=1e-5, per_device_train_batch_size=batch_size, gradient_accumulation_steps= 1, per_device_eval_batch_size= 64, num_train_epochs= 2, logging_steps= 100, load_best_model_at_end=True, # Keep the best model according to the evaluation metric_for_best_model="mcc_score", # The mcc_score on the evaluation dataset used to select the best model label_names=["labels"], dataloader_drop_last=True, max_steps= 1000 )<jupyter_output><empty_output><jupyter_text>Here, the metric used to evaluate the model is the Matthews Correlation Coefficient, which is more relevant than the accuracy when the classes in the dataset are unbalanced. We can load a predefined function from the `scikit-learn` library.<jupyter_code># Define the metric for the evaluation def compute_metrics_mcc(eval_pred): """Computes Matthews correlation coefficient (MCC score) for binary classification""" predictions = np.argmax(eval_pred.predictions, axis=-1) references = eval_pred.label_ids r={'mcc_score': matthews_corrcoef(references, predictions)} return r trainer = Trainer( model, args_enhancers, train_dataset= tokenized_datasets_train_enhancers, eval_dataset= tokenized_datasets_validation_enhancers, tokenizer=tokenizer, compute_metrics=compute_metrics_mcc, )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>train_results = trainer.train()<jupyter_output>/usr/local/lib/python3.10/dist-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning warnings.warn(<jupyter_text>As with the first task, the time can be greatly reduced by increasing the batch size. **Validation MCC score**<jupyter_code>curve_evaluation_mcc_score=[[a['step'],a['eval_mcc_score']] for a in trainer.state.log_history if 'eval_mcc_score' in a.keys()] eval_mcc_score = [c[1] for c in curve_evaluation_mcc_score] steps = [c[0] for c in curve_evaluation_mcc_score] plt.plot(steps, eval_mcc_score, 'b', label='Validation MCC score') plt.title('Validation MCC score for enhancer prediction') plt.xlabel('Number of training steps performed') plt.ylabel('Validation MCC score') plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>**MCC on the test dataset**<jupyter_code># Compute the MCC score on the test dataset : print(f"MCC score on the test dataset: {trainer.predict(tokenized_datasets_test_enhancers).metrics['test_mcc_score']}")<jupyter_output><empty_output>
notebooks/examples/nucleotide_transformer_dna_sequence_modelling.ipynb/0
{ "file_path": "notebooks/examples/nucleotide_transformer_dna_sequence_modelling.ipynb", "repo_id": "notebooks", "token_count": 6637 }
150
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install ๐Ÿค— Transformers and ๐Ÿค— Datasets as well as other dependencies. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets evaluate transformers rouge-score nltk<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("summarization_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a summarization task In this notebook, we will see how to fine-tune one of the [๐Ÿค— Transformers](https://github.com/huggingface/transformers) model for a summarization task. We will use the [XSum dataset](https://arxiv.org/pdf/1808.08745.pdf) (for extreme summarization) which contains BBC articles accompanied with single-sentence summaries.We will see how to easily load the dataset for this task using ๐Ÿค— Datasets and how to fine-tune a model on it using the `Trainer` API.<jupyter_code>model_checkpoint = "t5-small"<jupyter_output><empty_output><jupyter_text>This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`t5-small`](https://huggingface.co/t5-small) checkpoint. Loading the dataset We will use the [๐Ÿค— Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset from evaluate import load raw_datasets = load_dataset("xsum") metric = load("rouge")<jupyter_output>2023-06-06 12:43:46.062773: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. Found cached dataset xsum (/home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71)<jupyter_text>The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set:<jupyter_code>raw_datasets<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>raw_datasets["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=5): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, datasets.ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(raw_datasets["train"])<jupyter_output><empty_output><jupyter_text>The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):<jupyter_code>metric<jupyter_output><empty_output><jupyter_text>You can call its `compute` method with your predictions and labels, which need to be list of decoded strings:<jupyter_code>fake_preds = ["hello there", "general kenobi"] fake_labels = ["hello there", "general kenobi"] metric.compute(predictions=fake_preds, references=fake_labels)<jupyter_output><empty_output><jupyter_text>Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a ๐Ÿค— Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that the model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>By default, the call above will use one of the fast tokenizers (backed by Rust) from the ๐Ÿค— Tokenizers library. You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this one sentence!")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Instead of one sentence, we can pass along a list of sentences:<jupyter_code>tokenizer(["Hello, this one sentence!", "This is another sentence."])<jupyter_output><empty_output><jupyter_text>To prepare the targets for our model, we need to tokenize them using the `text_target` parameter. This will make sure the tokenizer uses the special tokens corresponding to the targets:<jupyter_code>print(tokenizer(text_target=["Hello, this one sentence!", "This is another sentence."]))<jupyter_output>{'input_ids': [[8774, 6, 48, 80, 7142, 55, 1], [100, 19, 430, 7142, 5, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]}<jupyter_text>If you are using one of the five T5 checkpoints we have to prefix the inputs with "summarize:" (the model can also translate and it needs the prefix to know which task it has to perform).<jupyter_code>if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]: prefix = "summarize: " else: prefix = ""<jupyter_output><empty_output><jupyter_text>We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.<jupyter_code>max_input_length = 1024 max_target_length = 128 def preprocess_function(examples): inputs = [prefix + doc for doc in examples["document"]] model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) # Setup the tokenizer for targets labels = tokenizer(text_target=examples["summary"], max_length=max_target_length, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>preprocess_function(raw_datasets['train'][:2])<jupyter_output><empty_output><jupyter_text>To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71/cache-c29884c8f95fd0b3.arrow WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71/cache-c7c633359ea6092f.arrow WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/sudolife/.cache/huggingface/datasets/xsum/default/1.2.0/082863bf4754ee058a5b6f6525d0cb2b18eadb62c7b370b095d1364050a52b71/cache-e1e5d997fb514ab9.arrow<jupyter_text>Even better, the results are automatically cached by the ๐Ÿค— Datasets library to avoid spending time on this step the next time you run your notebook. The ๐Ÿค— Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. ๐Ÿค— Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.<jupyter_code>from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case. To instantiate a `Seq2SeqTrainer`, we will need to define three more things. The most important is the [`Seq2SeqTrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.Seq2SeqTrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>batch_size = 16 model_name = model_checkpoint.split("/")[-1] args = Seq2SeqTrainingArguments( f"{model_name}-finetuned-xsum", evaluation_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, weight_decay=0.01, save_total_limit=3, num_train_epochs=1, predict_with_generate=True, fp16=True, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the cell and customize the weight decay. Since the `Seq2SeqTrainer` will save the model regularly and our dataset is quite large, we tell it to make three saves maximum. Lastly, we use the `predict_with_generate` option (to properly generate summaries) and activate mixed precision training (to go a bit faster).The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/t5-finetuned-xsum"` or `"huggingface/t5-finetuned-xsum"`).Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels:<jupyter_code>data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)<jupyter_output><empty_output><jupyter_text>The last thing to define for our `Seq2SeqTrainer` is how to compute the metrics from the predictions. We need to define a function for this, which will just use the `metric` we loaded earlier, and we have to do a bit of pre-processing to decode the predictions into texts:<jupyter_code>import nltk import numpy as np def compute_metrics(eval_pred): predictions, labels = eval_pred decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Rouge expects a newline after each sentence decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds] decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels] # Note that other metrics may not have a `use_aggregator` parameter # and thus will return a list, computing a metric for each sentence. result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True, use_aggregator=True) # Extract a few results result = {key: value * 100 for key, value in result.items()} # Add mean generated length prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions] result["gen_len"] = np.mean(prediction_lens) return {k: round(v, 4) for k, v in result.items()}<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Seq2SeqTrainer`:<jupyter_code>trainer = Seq2SeqTrainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
notebooks/examples/summarization.ipynb/0
{ "file_path": "notebooks/examples/summarization.ipynb", "repo_id": "notebooks", "token_count": 5127 }
151
<jupyter_start><jupyter_text>Fine-tuning for Video Classification with ๐Ÿค— TransformersThis notebook shows how to fine-tune a pre-trained Vision model for Video Classification on a custom dataset. The idea is to add a randomly initialized classification head on top of a pre-trained encoder and fine-tune the model altogether on a labeled dataset. DatasetThis notebook uses a subset of the [UCF-101 dataset](https://www.crcv.ucf.edu/data/UCF101.php). We'll be using a subset of the dataset to keep the runtime of the tutorial short. The subset was prepared using [this notebook](https://drive.google.com/file/d/1tTScjnyiKrBz84jKe1H_hPGGXffAZuxX/view?usp=sharing) following [this guide](https://www.tensorflow.org/tutorials/load_data/video). ModelWe'll fine-tune the [VideoMAE model](https://huggingface.co/docs/transformers/model_doc/videomae), which was pre-trained on the [Kinetics 400 dataset](https://www.deepmind.com/open-source/kinetics). You can find the other variants of VideoMAE available on ๐Ÿค— Hub [here](https://huggingface.co/models?search=videomae). You can also extend this notebook to use other video models such as [X-CLIP](https://huggingface.co/docs/transformers/model_doc/xcliptransformers.XCLIPVisionModel). **Note** that for models where there's no classification head already available you'll have to manually attach it (randomly initialized). But this is not the case for VideoMAE since we already have a [`VideoMAEForVideoClassification`](https://huggingface.co/docs/transformers/model_doc/xcliptransformers.XCLIPVisionModel) class. Data preprocessingThis notebook leverages [TorchVision's](https://pytorch.org/vision/stable/transforms.html) and [PyTorchVideo's](https://pytorchvideo.org/) transforms for applying data preprocessing transformations including data augmentation.---Depending on the model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those two parameters, then the rest of the notebook should run smoothly.<jupyter_code>model_ckpt = "MCG-NJU/videomae-base" # pre-trained model from which to fine-tune batch_size = 8 # batch size for training and evaluation<jupyter_output><empty_output><jupyter_text>Before we start, let's install the `pytorchvideo`, `transformers`, and `evaluate` libraries.<jupyter_code>!pip install pytorchvideo transformers evaluate -q<jupyter_output> |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 132 kB 4.9 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 5.5 MB 51.0 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 72 kB 1.6 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 50 kB 7.3 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 30.7 MB 1.2 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 42 kB 1.1 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 7.6 MB 48.7 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 163 kB 89.3 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 115 kB 85.3 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 441 kB 85.1 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 212 kB 53.0 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 95 kB 5.6 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 127 kB 88.8 MB/s  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 115 kB 86.0 MB/s [?25h Building wheel for pytorchvideo (setup.py) ... [?25l[?25hdone Building wheel for fvcore (setup.py) ... [?25l[?25hdone Building wheel for io[...]<jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your token:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output>Login successful Your token has been saved to /root/.huggingface/token<jupyter_text>Then you need to install Git-LFS to upload your model checkpoints:<jupyter_code>!git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("video_classification_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a video classification task In this notebook, we will see how to fine-tune one of the [๐Ÿค— Transformers](https://github.com/huggingface/transformers) vision models on a Video Classification dataset.Given a video, the goal is to predict an appropriate class for it, like "archery". Loading the dataset Here we first download the subset archive and un-archive it.<jupyter_code>from huggingface_hub import hf_hub_download hf_dataset_identifier = "sayakpaul/ucf101-subset" filename = "UCF101_subset.tar.gz" file_path = hf_hub_download( repo_id=hf_dataset_identifier, filename=filename, repo_type="dataset" ) !tar xf {file_path}<jupyter_output><empty_output><jupyter_text>Now, let's investigate what is inside the archive.<jupyter_code>dataset_root_path = "UCF101_subset" !find {dataset_root_path} | head -5<jupyter_output>UCF101_subset UCF101_subset/val UCF101_subset/val/BasketballDunk UCF101_subset/val/BasketballDunk/v_BasketballDunk_g05_c05.avi UCF101_subset/val/BasketballDunk/UCF101<jupyter_text>Broadly, `dataset_root_path` is organized like so:```bashUCF101_subset/ train/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... val/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... test/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ...``` Let's now count the number of total videos we have.<jupyter_code>import pathlib dataset_root_path = pathlib.Path(dataset_root_path) video_count_train = len(list(dataset_root_path.glob("train/*/*.avi"))) video_count_val = len(list(dataset_root_path.glob("val/*/*.avi"))) video_count_test = len(list(dataset_root_path.glob("test/*/*.avi"))) video_total = video_count_train + video_count_val + video_count_test print(f"Total videos: {video_total}") all_video_file_paths = ( list(dataset_root_path.glob("train/*/*.avi")) + list(dataset_root_path.glob("val/*/*.avi")) + list(dataset_root_path.glob("test/*/*.avi")) ) all_video_file_paths[:5]<jupyter_output><empty_output><jupyter_text>The video paths, when `sorted`, appear like so:```py...'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c04.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c06.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c01.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c02.avi','UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c06.avi'... ```We notice that there are video clips belonging to the same group / scene where group is denoted by `g` in the video file paths. `v_ApplyEyeMakeup_g07_c04.avi` and `v_ApplyEyeMakeup_g07_c06.avi`, for example. For the validation and evaluation splits, we wouldn't want to have video clips from the same group / scene to prevent [data leakage](https://www.kaggle.com/code/alexisbcook/data-leakage). The subset that we're using in this tutorial takes this information into account. Next up, we derive the set of labels we have in the dataset. Let's also create two dictionaries that'll be helpful when initializing the model:* `label2id`: maps the class names to integers.* `id2label`: maps the integers to class names.<jupyter_code>class_labels = sorted({str(path).split("/")[2] for path in all_video_file_paths}) label2id = {label: i for i, label in enumerate(class_labels)} id2label = {i: label for label, i in label2id.items()} print(f"Unique classes: {list(label2id.keys())}.")<jupyter_output>Unique classes: ['ApplyEyeMakeup', 'ApplyLipstick', 'Archery', 'BabyCrawling', 'BalanceBeam', 'BandMarching', 'BaseballPitch', 'Basketball', 'BasketballDunk', 'BenchPress'].<jupyter_text>We've got 10 unique classes. For each class we have 30 videos in the training set. Loading the model In the next cell, we initialize a video classification model where the encoder is initialized with the pre-trained parameters and the classification head is randomly initialized. We also initialize the feature extractor associated to the model. This will come in handy during writing the preprocessing pipeline for our dataset.<jupyter_code>from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification image_processor = VideoMAEImageProcessor.from_pretrained(model_ckpt) model = VideoMAEForVideoClassification.from_pretrained( model_ckpt, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint )<jupyter_output>The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.<jupyter_text>The warning is telling us we are throwing away some weights (e.g. the weights and bias of the `classifier` layer) and randomly initializing some other (the weights and bias of a new `classifier` layer). This is expected in this case, because we are adding a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. **Note** that [this checkpoint](https://huggingface.co/MCG-NJU/videomae-base-finetuned-kinetics) leads to better performance on this task as the checkpoint was obtained fine-tuning on a similar downstream task having considerable domain overlap. You can check out [this checkpoint](https://huggingface.co/sayakpaul/videomae-base-finetuned-kinetics-finetuned-ucf101-subset) which was obtained by fine-tuning `MCG-NJU/videomae-base-finetuned-kinetics` and it obtains much better performance. Constructing the datasets for training For preprocessing the videos, we'll leverage the [PyTorch Video library](https://pytorchvideo.org/). We start by importing the dependencies we need.<jupyter_code>import pytorchvideo.data from pytorchvideo.transforms import ( ApplyTransformToKey, Normalize, RandomShortSideScale, RemoveKey, ShortSideScale, UniformTemporalSubsample, ) from torchvision.transforms import ( Compose, Lambda, RandomCrop, RandomHorizontalFlip, Resize, )<jupyter_output><empty_output><jupyter_text>For the training dataset transformations, we use a combination of uniform temporal subsampling, pixel normalization, random cropping, and random horizontal flipping. For the validation and evaluation dataset transformations, we keep the transformation chain the same except for random cropping and horizontal flipping. To learn more about the details of these transformations check out the [official documentation of PyTorch Video](https://pytorchvideo.org). We'll use the `image_processor` associated with the pre-trained model to obtain the following information:* Image mean and standard deviation with which the video frame pixels will be normalized.* Spatial resolution to which the video frames will be resized.<jupyter_code>import os mean = image_processor.image_mean std = image_processor.image_std if "shortest_edge" in image_processor.size: height = width = image_processor.size["shortest_edge"] else: height = image_processor.size["height"] width = image_processor.size["width"] resize_to = (height, width) num_frames_to_sample = model.config.num_frames sample_rate = 4 fps = 30 clip_duration = num_frames_to_sample * sample_rate / fps # Training dataset transformations. train_transform = Compose( [ ApplyTransformToKey( key="video", transform=Compose( [ UniformTemporalSubsample(num_frames_to_sample), Lambda(lambda x: x / 255.0), Normalize(mean, std), RandomShortSideScale(min_size=256, max_size=320), RandomCrop(resize_to), RandomHorizontalFlip(p=0.5), ] ), ), ] ) # Training dataset. train_dataset = pytorchvideo.data.Ucf101( data_path=os.path.join(dataset_root_path, "train"), clip_sampler=pytorchvideo.data.make_clip_sampler("random", clip_duration), decode_audio=False, transform=train_transform, ) # Validation and evaluation datasets' transformations. val_transform = Compose( [ ApplyTransformToKey( key="video", transform=Compose( [ UniformTemporalSubsample(num_frames_to_sample), Lambda(lambda x: x / 255.0), Normalize(mean, std), Resize(resize_to), ] ), ), ] ) # Validation and evaluation datasets. val_dataset = pytorchvideo.data.Ucf101( data_path=os.path.join(dataset_root_path, "val"), clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), decode_audio=False, transform=val_transform, ) test_dataset = pytorchvideo.data.Ucf101( data_path=os.path.join(dataset_root_path, "test"), clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), decode_audio=False, transform=val_transform, )<jupyter_output><empty_output><jupyter_text>**Note**: The above dataset pipelines are taken from the [official PyTorch Video example](https://pytorchvideo.org/docs/tutorial_classificationdataset). We're using the [`pytorchvideo.data.Ucf101()`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.htmlpytorchvideo.data.Ucf101) function because it's tailored for the UCF-101 dataset. Under the hood, it returns a [`pytorchvideo.data.labeled_video_dataset.LabeledVideoDataset`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.htmlpytorchvideo.data.LabeledVideoDataset) object. `LabeledVideoDataset` class is the base class for all things video in the PyTorch Video dataset. So, if you wanted to use a custom dataset not supported off-the-shelf by PyTorch Video, you can extend the `LabeledVideoDataset` class accordingly. Refer to the `data` API [documentation to](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html) learn more. Also, if your dataset follows a similar structure (as shown above), then using the `pytorchvideo.data.Ucf101()` should work just fine.<jupyter_code># We can access the `num_videos` argument to know the number of videos we have in the # dataset. train_dataset.num_videos, val_dataset.num_videos, test_dataset.num_videos<jupyter_output><empty_output><jupyter_text>Let's now take a preprocessed video from the dataset and investigate it.<jupyter_code>sample_video = next(iter(train_dataset)) sample_video.keys() def investigate_video(sample_video): """Utility to investigate the keys present in a single video sample.""" for k in sample_video: if k == "video": print(k, sample_video["video"].shape) else: print(k, sample_video[k]) print(f"Video label: {id2label[sample_video[k]]}") investigate_video(sample_video)<jupyter_output>video torch.Size([3, 16, 224, 224]) video_name v_Basketball_g01_c01.avi video_index 210 clip_index 0 aug_index 0 label 7 Video label: Basketball<jupyter_text>We can also visualize the preprocessed videos for easier debugging.<jupyter_code>import imageio import numpy as np from IPython.display import Image def unnormalize_img(img): """Un-normalizes the image pixels.""" img = (img * std) + mean img = (img * 255).astype("uint8") return img.clip(0, 255) def create_gif(video_tensor, filename="sample.gif"): """Prepares a GIF from a video tensor. The video tensor is expected to have the following shape: (num_frames, num_channels, height, width). """ frames = [] for video_frame in video_tensor: frame_unnormalized = unnormalize_img(video_frame.permute(1, 2, 0).numpy()) frames.append(frame_unnormalized) kargs = {"duration": 0.25} imageio.mimsave(filename, frames, "GIF", **kargs) return filename def display_gif(video_tensor, gif_name="sample.gif"): """Prepares and displays a GIF from a video tensor.""" video_tensor = video_tensor.permute(1, 0, 2, 3) gif_filename = create_gif(video_tensor, gif_name) return Image(filename=gif_filename) video_tensor = sample_video["video"] display_gif(video_tensor)<jupyter_output><empty_output><jupyter_text>Training the model We'll leverage [`Trainer`](https://huggingface.co/docs/transformers/main_classes/trainer) from ๐Ÿค— Transformers for training the model. To instantiate a `Trainer`, we will need to define the training configuration and an evaluation metric. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to configure the training. It requires an output folder name, which will be used to save the checkpoints of the model. It also helps sync all the information in the model repository on ๐Ÿค— Hub.Most of the training arguments are pretty self-explanatory, but one that is quite important here is `remove_unused_columns=False`. This one will drop any features not used by the model's call function. By default it's `True` because usually it's ideal to drop unused feature columns, making it easier to unpack inputs into the model's call function. But, in our case, we need the unused features ('video' in particular) in order to create `pixel_values` (which is a mandatory key our model expects in its inputs).<jupyter_code>from transformers import TrainingArguments, Trainer model_name = model_ckpt.split("/")[-1] new_model_name = f"{model_name}-finetuned-ucf101-subset" num_epochs = 4 args = TrainingArguments( new_model_name, remove_unused_columns=False, evaluation_strategy="epoch", save_strategy="epoch", learning_rate=5e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, warmup_ratio=0.1, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, max_steps=(train_dataset.num_videos // batch_size) * num_epochs, )<jupyter_output><empty_output><jupyter_text>There's no need to define `max_steps` when instantiating `TrainingArguments`. Since the dataset returned by `pytorchvideo.data.Ucf101()` doesn't implement the `__len__()` method we had to specify `max_steps`. Next, we need to define a function for how to compute the metrics from the predictions, which will just use the `metric` we'll load now. The only preprocessing we have to do is to take the argmax of our predicted logits:<jupyter_code>import evaluate metric = evaluate.load("accuracy") # the compute_metrics function takes a Named Tuple as input: # predictions, which are the logits of the model as Numpy arrays, # and label_ids, which are the ground-truth labels as Numpy arrays. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions.""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>**A note on evaluation**:In the [VideoMAE paper](https://arxiv.org/abs/2203.12602), the authors use the following evaluation strategy. They evaluate the model on several clips from test videos and apply different crops to those clips and report the aggregate score. However, in the interest of simplicity and brevity, we don't consider that in this tutorial. We also define a `collate_fn`, which will be used to batch examples together.Each batch consists of 2 keys, namely `pixel_values` and `labels`.<jupyter_code>import torch def collate_fn(examples): """The collation function to be used by `Trainer` to prepare data batches.""" # permute to (num_frames, num_channels, height, width) pixel_values = torch.stack( [example["video"].permute(1, 0, 2, 3) for example in examples] ) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, )<jupyter_output>Cloning https://huggingface.co/sayakpaul/videomae-base-finetuned-ucf101-subset into local empty directory. WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/videomae-base-finetuned-ucf101-subset into local empty directory.<jupyter_text>You might wonder why we pass along the `image_processor` as a tokenizer when we already preprocessed our data. This is only to make sure the feature extractor configuration file (stored as JSON) will also be uploaded to the repo on the hub. Now we can finetune our model by calling the `train` method:<jupyter_code>train_results = trainer.train()<jupyter_output>/usr/local/lib/python3.7/dist-packages/transformers/optimization.py:310: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning FutureWarning, ***** Running training ***** Num examples = 1184 Num Epochs = 9223372036854775807 Instantaneous batch size per device = 8 Total train batch size (w. parallel, distributed & accumulation) = 8 Gradient Accumulation steps = 1 Total optimization steps = 148 Number of trainable parameters = 86234890<jupyter_text>We can check with the `evaluate` method that our `Trainer` did reload the best model properly (if it was not the last one):<jupyter_code>trainer.evaluate(test_dataset) trainer.save_model() test_results = trainer.evaluate(test_dataset) trainer.log_metrics("test", test_results) trainer.save_metrics("test", test_results) trainer.save_state()<jupyter_output>Saving model checkpoint to videomae-base-finetuned-ucf101-subset Configuration saved in videomae-base-finetuned-ucf101-subset/config.json Model weights saved in videomae-base-finetuned-ucf101-subset/pytorch_model.bin Feature extractor saved in videomae-base-finetuned-ucf101-subset/preprocessor_config.json Saving model checkpoint to videomae-base-finetuned-ucf101-subset Configuration saved in videomae-base-finetuned-ucf101-subset/config.json Model weights saved in videomae-base-finetuned-ucf101-subset/pytorch_model.bin Feature extractor saved in videomae-base-finetuned-ucf101-subset/preprocessor_config.json Several commits (2) will be pushed upstream. WARNING:huggingface_hub.repository:Several commits (2) will be pushed upstream. The progress bars may be unreliable. WARNING:huggingface_hub.repository:The progress bars may be unreliable.<jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction (note that the Trainer will automatically create a model card as well as Tensorboard logs - see the "Training metrics" tab - amazing isn't it?):<jupyter_code>trainer.push_to_hub()<jupyter_output>Saving model checkpoint to videomae-base-finetuned-ucf101-subset Configuration saved in videomae-base-finetuned-ucf101-subset/config.json Model weights saved in videomae-base-finetuned-ucf101-subset/pytorch_model.bin Feature extractor saved in videomae-base-finetuned-ucf101-subset/preprocessor_config.json<jupyter_text>Now that our model is trained, let's use it to run inference on a video from `test_dataset`. Inference Let's load the trained model checkpoint and fetch a video from `test_dataset`.<jupyter_code>trained_model = VideoMAEForVideoClassification.from_pretrained(new_model_name) sample_test_video = next(iter(test_dataset)) investigate_video(sample_test_video)<jupyter_output>video torch.Size([3, 16, 224, 224]) video_name v_BasketballDunk_g12_c05.avi video_index 62 clip_index 0 aug_index 0 label 8 Video label: BasketballDunk<jupyter_text>We then prepare the video as a `torch.Tensor` and run inference.<jupyter_code>def run_inference(model, video): """Utility to run inference given a model and test video. The video is assumed to be preprocessed already. """ # (num_frames, num_channels, height, width) perumuted_sample_test_video = video.permute(1, 0, 2, 3) inputs = { "pixel_values": perumuted_sample_test_video.unsqueeze(0), "labels": torch.tensor( [sample_test_video["label"]] ), # this can be skipped if you don't have labels available. } device = torch.device("cuda" if torch.cuda.is_available() else "cpu") inputs = {k: v.to(device) for k, v in inputs.items()} model = model.to(device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits return logits logits = run_inference(trained_model, sample_test_video["video"])<jupyter_output><empty_output><jupyter_text>We can now check if the model got the prediction right.<jupyter_code>display_gif(sample_test_video["video"]) predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx])<jupyter_output>Predicted class: BasketballDunk
notebooks/examples/video_classification.ipynb/0
{ "file_path": "notebooks/examples/video_classification.ipynb", "repo_id": "notebooks", "token_count": 8881 }
152
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - training with custom metrics Binary Classification with `Trainer` and `imdb` dataset In this demo, we extend the basic classification demo by adding **metrics definition** to capture and visualize training metrics.The documentation of the SageMaker metrics capture feature can be seen at https://docs.aws.amazon.com/sagemaker/latest/dg/training-metrics.htmlWe additionally use **SageMaker Checkpointing** to send intermediary checkpoint data to S3 uncompressed in parallel to the training happening https://docs.aws.amazon.com/sagemaker/latest/dg/model-checkpoints.htmlSageMaker Checkpointing is supported by HF Trainer after Transformers 4.4.0+ Import libraries and set environment_*Note:* we only install the required libraries from Hugging Face and AWS. You also need PyTorch or Tensorflow, if you havenยดt it installed_<jupyter_code>!pip install "sagemaker>=2.140.0" "transformers==4.26.1" "datasets[s3]==2.10.1" --upgrade<jupyter_output><empty_output><jupyter_text>Development environment<jupyter_code>import sagemaker.huggingface<jupyter_output><empty_output><jupyter_text>Permissions_If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>PreprocessingWe are using the `datasets` library to download and preprocess the `imdb` dataset. After preprocessing, the dataset will be uploaded to our `sagemaker_session_bucket` to be used within our training job. The [imdb](http://ai.stanford.edu/~amaas/data/sentiment/) dataset consists of 25000 training and 25000 testing highly polar movie reviews. Tokenization<jupyter_code>from datasets import load_dataset from transformers import AutoTokenizer # tokenizer used in preprocessing tokenizer_name = 'distilbert-base-uncased' # dataset used dataset_name = 'imdb' # s3 key prefix for the data s3_prefix = 'samples/datasets/imdb' # load dataset dataset = load_dataset(dataset_name) # download tokenizer tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) # tokenizer helper function def tokenize(batch): return tokenizer(batch['text'], padding='max_length', truncation=True) # load dataset train_dataset, test_dataset = load_dataset('imdb', split=['train', 'test']) test_dataset = test_dataset.shuffle().select(range(10000)) # smaller the size for test dataset to 10k # tokenize dataset train_dataset = train_dataset.map(tokenize, batched=True) test_dataset = test_dataset.map(tokenize, batched=True) # set format for pytorch train_dataset = train_dataset.rename_column("label", "labels") train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) test_dataset = test_dataset.rename_column("label", "labels") test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])<jupyter_output><empty_output><jupyter_text>Uploading data to Amazon S3After we processed the `datasets` we are going to use the new `FileSystem` [integration](https://huggingface.co/docs/datasets/filesystems.html) to upload our dataset to S3.<jupyter_code># save train_dataset to s3 training_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/train' train_dataset.save_to_disk(training_input_path) # save test_dataset to s3 test_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/test' test_dataset.save_to_disk(test_input_path)<jupyter_output><empty_output><jupyter_text>Launching a Training Job with custom metrics<jupyter_code>from sagemaker.huggingface import HuggingFace # hyperparameters, which will be passed into the training job hyperparameters={ 'epochs': 3, 'train_batch_size': 32, 'checkpoints': '/opt/ml/checkpoints/', 'model_name':'distilbert-base-uncased'}<jupyter_output><empty_output><jupyter_text>We create a metric_definition dictionary that contains regex-based definitions that will be used to parse the job logs and extract metrics<jupyter_code>metric_definitions=[ {'Name': 'loss', 'Regex': "'loss': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'learning_rate', 'Regex': "'learning_rate': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_loss', 'Regex': "'eval_loss': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_accuracy', 'Regex': "'eval_accuracy': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_f1', 'Regex': "'eval_f1': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_precision', 'Regex': "'eval_precision': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_recall', 'Regex': "'eval_recall': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_runtime', 'Regex': "'eval_runtime': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_samples_per_second', 'Regex': "'eval_samples_per_second': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'epoch', 'Regex': "'epoch': ([0-9]+(.|e\-)[0-9]+),?"}] huggingface_estimator = HuggingFace( entry_point='train.py', source_dir='./scripts', instance_type='ml.p3.2xlarge', instance_count=1, transformers_version='4.26', pytorch_version='1.13', py_version='py39', role=role, hyperparameters=hyperparameters, metric_definitions=metric_definitions) # starting the train job with our uploaded datasets as input huggingface_estimator.fit({'train': training_input_path, 'test': test_input_path})<jupyter_output><empty_output><jupyter_text>Accessing Training MetricsThe training job doesn't emit metrics immediately. For example, it first needs to provision a training instance, download the training image, download the data. Additionally in this demo the first evaluation logs come after 500 steps (default in the Hugging Face trainer https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments).Hence, **run the below section 15 to 20 minutes after launching the training, otherwise it may not have available metrics yet and return an error**Note that you can also copy this code and run it from a different place (as long as connected to the cloud and authorized to use the API), by specifiying the exact training job name in the `TrainingJobAnalytics` API call.)<jupyter_code>from sagemaker import TrainingJobAnalytics # Captured metrics can be accessed as a Pandas dataframe df = TrainingJobAnalytics(training_job_name=huggingface_estimator.latest_training_job.name).dataframe() df.head(10)<jupyter_output>INFO:botocore.credentials:Found credentials from IAM Role: BaseNotebookInstanceEc2InstanceRole<jupyter_text>We can also plot some of the metrics collected*Note: the plot below were generated at the end of the training job, with metrics available for all training duration*<jupyter_code>!pip install seaborn from matplotlib import pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = [15,5] evals = df[df.metric_name.isin(['eval_accuracy','eval_precision'])] losses = df[df.metric_name.isin(['loss', 'eval_loss'])] sns.lineplot( x='timestamp', y='value', data=evals, hue='metric_name', palette=['blue', 'purple']) ax2 = plt.twinx() sns.lineplot( x='timestamp', y='value', data=losses, hue='metric_name', palette=['orange', 'red'], ax=ax2)<jupyter_output><empty_output><jupyter_text>Deploying the endpointTo deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type.<jupyter_code>predictor = huggingface_estimator.deploy(1,"ml.g4dn.xlarge")<jupyter_output><empty_output><jupyter_text>Then, we use the returned predictor object to call the endpoint.<jupyter_code>sentiment_input= {"inputs":"I love using the new Inference DLC."} predictor.predict(sentiment_input)<jupyter_output><empty_output><jupyter_text>Finally, we delete the endpoint again.<jupyter_code>predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/06_sagemaker_metrics/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/06_sagemaker_metrics/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 3000 }
153
<jupyter_start><jupyter_text>Going Production: Auto-scale Hugging Face Transformer Endpoints with Amazon SageMaker Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy a transformer model for real-time inference. In this example we are going to deploy a trained Hugging Face Transformer model on to SageMaker for inference.<jupyter_code>!pip install "sagemaker>=2.66.2" --upgrade<jupyter_output><empty_output><jupyter_text>Deploy one of the 15 000+ Hugging Face Transformers to Amazon SageMaker for InferenceTo deploy a model directly from the Hub to SageMaker we need to define 2 environment variables when creating the `HuggingFaceModel` . We need to define:- `HF_MODEL_ID`: defines the model id, which will be automatically loaded from [huggingface.co/models](http://huggingface.co/models) when creating or SageMaker Endpoint. The ๐Ÿค— Hub provides +10 000 models all available through this environment variable.- `HF_TASK`: defines the task for the used ๐Ÿค— Transformers pipeline. A full list of tasks can be find [here](https://huggingface.co/transformers/main_classes/pipelines.html).<jupyter_code>import sagemaker import boto3 try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] print(f"sagemaker role arn: {role}") from sagemaker.huggingface import HuggingFaceModel from uuid import uuid4 import sagemaker # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'yiyanghkust/finbert-tone', # model_id from hf.co/models 'HF_TASK':'text-classification' # NLP task you want to use for predictions } # endpoint name endpoint_name=f'{hub["HF_MODEL_ID"].split("/")[1]}-{str(uuid4())}' # model and endpoint name # create Hugging Face Model Class huggingface_model = HuggingFaceModel( env=hub, role=role, # iam role with permissions to create an Endpoint name=endpoint_name, # model and endpoint name transformers_version="4.26", # transformers version used pytorch_version="1.13", # pytorch version used py_version="py39", # python version of the DLC ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, instance_type="ml.c5.large" ) # get aws region for dashboards aws_region = predictor.sagemaker_session.boto_region_name<jupyter_output>-----!<jupyter_text>**Architecture**The [Hugging Face Inference Toolkit for SageMaker](https://github.com/aws/sagemaker-huggingface-inference-toolkit) is an open-source library for serving Hugging Face transformer models on SageMaker. It utilizes the SageMaker Inference Toolkit for starting up the model server, which is responsible for handling inference requests. The SageMaker Inference Toolkit uses [Multi Model Server (MMS)](https://github.com/awslabs/multi-model-server) for serving ML models. It bootstraps MMS with a configuration and settings that make it compatible with SageMaker and allow you to adjust important performance parameters, such as the number of workers per model, depending on the needs of your scenario.**Deploying a model using SageMaker hosting services is a three-step process:**1. **Create a model in SageMaker** โ€”By creating a model, you tell SageMaker where it can find the model components. 2. **Create an endpoint configuration for an HTTPS endpoint** โ€”You specify the name of one or more models in production variants and the ML compute instances that you want SageMaker to launch to host each production variant.3. **Create an HTTPS endpoint** โ€”Provide the endpoint configuration to SageMaker. The service launches the ML compute instances and deploys the model or models as specified in the configuration<jupyter_code># example request, you always need to define "inputs" data = { "inputs": "There is a shortage of capital for project SageMaker. We need extra financing" } # request predictor.predict(data) for i in range(500): predictor.predict(data)<jupyter_output><empty_output><jupyter_text>Model Monitoring<jupyter_code>print(f"https://console.aws.amazon.com/cloudwatch/home?region={aws_region}#metricsV2:graph=~(metrics~(~(~'AWS*2fSageMaker~'ModelLatency~'EndpointName~'finbert-tone-73d26f97-9376-4b3f-9334-a2-2021-10-29-12-18-52-365~'VariantName~'AllTraffic))~view~'timeSeries~stacked~false~start~'-PT15M~end~'P0D~region~'{aws_region}~stat~'SampleCount~period~30);query=~'*7bAWS*2fSageMaker*2cEndpointName*2cVariantName*7d*20{predictor.endpoint_name}")<jupyter_output><empty_output><jupyter_text>Auto Scaling your Model[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a fully managed service that provides every developer and data scientist with the ability to quickly build, train, and deploy machine learning (ML) models at scale.Autoscaling is an out-of-the-box feature that monitors your workloads and dynamically adjusts the capacity to maintain steady and predictable performance at the possible lowest cost.The following diagram is a sample architecture that showcases how a model is served as a endpoint with autoscaling enabled. Reference Blog post [Configuring autoscaling inference endpoints in Amazon SageMaker](https://aws.amazon.com/de/blogs/machine-learning/configuring-autoscaling-inference-endpoints-in-amazon-sagemaker/) Configure Autoscaling for our EndpointYou can define minimum, desired, and maximum number of instances per endpoint and, based on the autoscaling configurations, instances are managed dynamically. The following diagram illustrates this architecture. AWS offers many different [ways to auto-scale your endpoints](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html). One of them Simple-Scaling, where you scale the instance capacity based on `CPUUtilization` of the instances or `SageMakerVariantInvocationsPerInstance`. In this example we are going to use `SageMakerVariantInvocationsPerInstance` to auto-scale our Endpoint<jupyter_code>import boto3 # Let us define a client to play with autoscaling options asg_client = boto3.client('application-autoscaling') # Common class representing Application Auto Scaling for SageMaker amongst other services # he resource type is variant and the unique identifier is the resource ID. # Example: endpoint/my-bert-fine-tuned/variant/AllTraffic . resource_id=f"endpoint/{predictor.endpoint_name}/variant/AllTraffic" # scaling configuration response = asg_client.register_scalable_target( ServiceNamespace='sagemaker', # ResourceId=resource_id, ScalableDimension='sagemaker:variant:DesiredInstanceCount', MinCapacity=1, MaxCapacity=4 )<jupyter_output><empty_output><jupyter_text>Create Scaling Policy with configuration details, e.g. `TargetValue` when the instance should be scaled.<jupyter_code>response = asg_client.put_scaling_policy( PolicyName=f'Request-ScalingPolicy-{predictor.endpoint_name}', ServiceNamespace='sagemaker', ResourceId=resource_id, ScalableDimension='sagemaker:variant:DesiredInstanceCount', PolicyType='TargetTrackingScaling', TargetTrackingScalingPolicyConfiguration={ 'TargetValue': 10.0, # Threshold 'PredefinedMetricSpecification': { 'PredefinedMetricType': 'SageMakerVariantInvocationsPerInstance', }, 'ScaleInCooldown': 300, # duration until scale in 'ScaleOutCooldown': 60 # duration between scale out } )<jupyter_output><empty_output><jupyter_text>stress test the endpoint with threaded requests<jupyter_code>import time request_duration_in_seconds = 4*65 end_time = time.time() + request_duration_in_seconds print(f"test will run {request_duration_in_seconds} seconds") while time.time() < end_time: predictor.predict(data)<jupyter_output><empty_output><jupyter_text>Monitor the `InvocationsPerInstance` in cloudwatch<jupyter_code>print(f"https://console.aws.amazon.com/cloudwatch/home?region={aws_region}#metricsV2:graph=~(metrics~(~(~'AWS*2fSageMaker~'InvocationsPerInstance~'EndpointName~'{predictor.endpoint_name}~'VariantName~'AllTraffic))~view~'timeSeries~stacked~false~region~'{aws_region}~start~'-PT15M~end~'P0D~stat~'SampleCount~period~60);query=~'*7bAWS*2fSageMaker*2cEndpointName*2cVariantName*7d*20{predictor.endpoint_name}")<jupyter_output><empty_output><jupyter_text>check the endpoint instance_count number<jupyter_code>bt_sm = boto3.client('sagemaker') response = bt_sm.describe_endpoint(EndpointName=predictor.endpoint_name) print(f"Endpoint {response['EndpointName']} has \nCurrent Instance Count: {response['ProductionVariants'][0]['CurrentInstanceCount']}\nWith a desired instance count of {response['ProductionVariants'][0]['DesiredInstanceCount']}")<jupyter_output>Endpoint finbert-tone-73d26f97-9376-4b3f-9334-a2-2021-10-29-12-18-52-365 has Current Instance Count: 4 With a desired instance count of 4<jupyter_text>Clean up<jupyter_code># delete endpoint predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/13_deploy_and_autoscaling_transformers/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/13_deploy_and_autoscaling_transformers/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 2793 }
154
import os from transformers import AutoConfig, AutoTokenizer import torch import torch.neuron # To use one neuron core per worker os.environ["NEURON_RT_NUM_CORES"] = "1" # saved weights name AWS_NEURON_TRACED_WEIGHTS_NAME = "neuron_model.pt" def model_fn(model_dir): # load tokenizer and neuron model from model_dir tokenizer = AutoTokenizer.from_pretrained(model_dir) model = torch.jit.load(os.path.join(model_dir, AWS_NEURON_TRACED_WEIGHTS_NAME)) model_config = AutoConfig.from_pretrained(model_dir) return model, tokenizer, model_config def predict_fn(data, model_tokenizer_model_config): # destruct model, tokenizer and model config model, tokenizer, model_config = model_tokenizer_model_config # create embeddings for inputs inputs = data.pop("inputs", data) embeddings = tokenizer( inputs, return_tensors="pt", max_length=model_config.traced_sequence_length, padding="max_length", truncation=True, ) # convert to tuple for neuron model neuron_inputs = tuple(embeddings.values()) # run prediciton with torch.no_grad(): predictions = model(*neuron_inputs)[0] scores = torch.nn.Softmax(dim=1)(predictions) # return dictonary, which will be json serializable return [{"label": model_config.id2label[item.argmax().item()], "score": item.max().item()} for item in scores]
notebooks/sagemaker/18_inferentia_inference/code/inference.py/0
{ "file_path": "notebooks/sagemaker/18_inferentia_inference/code/inference.py", "repo_id": "notebooks", "token_count": 519 }
155
import os import argparse from transformers import ( AutoModelForCausalLM, AutoTokenizer, set_seed, default_data_collator, ) from datasets import load_from_disk import torch from transformers import Trainer, TrainingArguments import torch.distributed as dist def safe_save_model_for_hf_trainer(trainer: Trainer, tokenizer: AutoTokenizer, output_dir: str): """Helper method to save model for HF Trainer.""" # see: https://github.com/tatsu-lab/stanford_alpaca/issues/65 from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, FullStateDictConfig, StateDictType, ) model = trainer.model save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, save_policy): cpu_state_dict = model.state_dict() if trainer.args.should_save: trainer._save(output_dir, state_dict=cpu_state_dict) # noqa tokenizer.save_pretrained(output_dir) def parse_arge(): """Parse the arguments.""" parser = argparse.ArgumentParser() # add model id and dataset path argument parser.add_argument( "--model_id", type=str, default="google/flan-t5-xl", help="Model id to use for training.", ) parser.add_argument("--dataset_path", type=str, default="lm_dataset", help="Path to dataset.") # add training hyperparameters for epochs, batch size, learning rate, and seed parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for.") parser.add_argument("--max_steps", type=int, default=None, help="Number of epochs to train for.") parser.add_argument( "--per_device_train_batch_size", type=int, default=1, help="Batch size to use for training.", ) parser.add_argument("--lr", type=float, default=3e-5, help="Learning rate to use for training.") parser.add_argument("--optimizer", type=str, default="adamw_hf", help="Learning rate to use for training.") parser.add_argument("--seed", type=int, default=42, help="Seed to use for training.") parser.add_argument( "--gradient_checkpointing", type=bool, default=True, help="Path to deepspeed config file.", ) parser.add_argument( "--bf16", type=bool, default=True if torch.cuda.get_device_capability()[0] == 8 else False, help="Whether to use bf16.", ) parser.add_argument("--fsdp", type=str, default=None, help="Whether to use fsdp.") parser.add_argument( "--fsdp_transformer_layer_cls_to_wrap", type=str, default=None, help="Which transformer layer to wrap with fsdp.", ) args = parser.parse_known_args() return args def training_function(args): # set seed set_seed(args.seed) dataset = load_from_disk(args.dataset_path) # load model from the hub model = AutoModelForCausalLM.from_pretrained( args.model_id, use_cache=False if args.gradient_checkpointing else True, # this is needed for gradient checkpointing ) tokenizer = AutoTokenizer.from_pretrained(args.model_id) # Define training args output_dir = "/tmp" training_args = TrainingArguments( output_dir=output_dir, overwrite_output_dir=True, per_device_train_batch_size=args.per_device_train_batch_size, bf16=args.bf16, # Use BF16 if available learning_rate=args.lr, num_train_epochs=args.epochs, gradient_checkpointing=args.gradient_checkpointing, # logging strategies logging_dir=f"{output_dir}/logs", logging_strategy="steps", logging_steps=10, save_strategy="no", optim=args.optimizer, ddp_timeout=7200, fsdp=args.fsdp, fsdp_transformer_layer_cls_to_wrap=args.fsdp_transformer_layer_cls_to_wrap, ) # Create Trainer instance trainer = Trainer( model=model, args=training_args, train_dataset=dataset, data_collator=default_data_collator, ) # Start training trainer.train() print("Training done!") # save model and tokenizer for easy inference safe_save_model_for_hf_trainer(trainer, tokenizer, "/opt/ml/model/") dist.barrier() def main(): args, _ = parse_arge() training_function(args) if __name__ == "__main__": main()
notebooks/sagemaker/25_pytorch_fsdp_model_parallelism/scripts/run_clm.py/0
{ "file_path": "notebooks/sagemaker/25_pytorch_fsdp_model_parallelism/scripts/run_clm.py", "repo_id": "notebooks", "token_count": 1807 }
156
import nbformat import os import re import shutil # Paths are set to work by invoking this scrip from the notebooks repo, presuming the transformers repo is in the # same parent folder as the notebooks repo. PATH_TO_DOCS = '../transformers/docs/source' PATH_TO_DEST = 'transformers_doc' DOC_BASE_URL = "https://huggingface.co/transformers/" # These are the doc files converted, add any new tutorial to this list if you want it handled by the conversion # script. TUTORIAL_FILES = [ "benchmarks.rst", "custom_datasets.rst", "multilingual.rst", "perplexity.rst", "preprocessing.rst", "quicktour.rst", "task_summary.rst", "tokenizer_summary.rst", "training.rst" ] ################################### # Parsing the rst file # ################################### # Re pattern that catches markdown titles. _re_title = re.compile(r"^#+\s+(\S+)") # Re pattern that catches rst blocks of the form `.. block_name::`. _re_block = re.compile(r"^\.\.\s+(\S+)::") # Re pattern that catches what's after the :: in rst blocks of the form `.. block_name:: something`. _re_block_lang = re.compile(r"^\.\.\s+\S+::\s*(\S+)(\s+|$)") # Re pattern that catchers section names like `.. _name:`. _re_anchor_section = re.compile(r"^\.\.\s+_(\S+):") # Re pattern that catches indentation at the start of a line. _re_indent = re.compile(r"^(\s*)\S") def split_blocks(lines): """ Read the lines of a doc file and group them by blocks.""" blocks = [] block_type = None current_block = [] i = 0 def _move_to_next_non_empty_line(i): while i < len(lines) and len(lines[i]) == 0: i += 1 return i def _build_block(blocks, current_block, block_type): if len(current_block) > 0: while len(current_block[-1]) == 0: current_block = current_block[:-1] blocks.append(('\n'.join(current_block), block_type)) return blocks, [] # Ignore everything before the main title (copyright header) while _re_title.search(lines[i]) is None: i += 1 while i < len(lines): line = lines[i] if _re_title.search(line) is not None: blocks, current_block = _build_block(blocks, current_block, "prose") blocks.append((line, "title")) i += 1 i = _move_to_next_non_empty_line(i) elif _re_block.search(line) is not None: blocks, current_block = _build_block(blocks, current_block, "prose") block_type = _re_block.search(line).groups()[0] if _re_block_lang.search(line): block_type += " " + _re_block_lang.search(line).groups()[0] i += 1 i = _move_to_next_non_empty_line(i) indent = _re_indent.search(lines[i]).groups()[0] if len(indent) > 0: while i < len(lines) and (lines[i].startswith(indent) or len(lines[i]) == 0): current_block.append(lines[i]) i += 1 blocks, current_block = _build_block(blocks, current_block, block_type) elif _re_anchor_section.search(line): blocks, current_block = _build_block(blocks, current_block, "prose") blocks.append((line, "anchor")) i += 1 i = _move_to_next_non_empty_line(i) else: current_block.append(line) i += 1 blocks, current_block = _build_block(blocks, current_block, "prose") return blocks ################################### # Text formatting and cleaning # ################################### def process_titles(lines): """ Converts rst titles to markdown titles.""" title_chars = """= - ` : ' " ~ ^ _ * + # < >""".split(" ") title_levels = {} new_lines = [] for line in lines: if len(new_lines) > 0 and len(line) >= len(new_lines[-1]) and len(set(line)) == 1 and line[0] in title_chars and line != "::": char = line[0] level = title_levels.get(char, len(title_levels) + 1) if level not in title_levels: title_levels[char] = level new_lines[-1] = f"{'#' * level} {new_lines[-1]}" else: new_lines.append(line) return new_lines # Re pattern to catch things inside ` ` in :obj:`thing`. _re_obj = re.compile(r":obj:`([^`]+)`") # Re pattern to catch things inside ` ` in :math:`thing`. _re_math = re.compile(r":math:`([^`]+)`") # Re pattern to catch things between single backquotes. _re_single_backquotes = re.compile(r"(^|[^`])`([^`]+)`([^`]|$)") # Re pattern to catch things between stars. _re_stars = re.compile(r"\*([^\*]+)\*") # Re pattern to catch things between double backquotes. _re_double_backquotes = re.compile(r"``([^`]+)``") # Re pattern to catch things inside ` ` in :func/class/meth:`thing`. _re_func_class = re.compile(r":(?:func|class|meth):`([^`]+)`") def convert_rst_formatting(text): """ Convert rst syntax for formatting to markdown in text.""" # Remove :class:, :func: and :meth: markers. Simplify what's inside and put double backquotes # (to not be caught by the italic conversion). def _rep_func_class(match): name = match.groups()[0] splits = name.split('.') i = 0 while i < len(splits)-1 and not splits[i][0].isupper(): i += 1 return f"``{'.'.join(splits[i:])}``" text = _re_func_class.sub(_rep_func_class, text) # Remove :obj: markers. What's after is in a single backquotes so we put in double backquotes # (to not be caught by the italic conversion). text = _re_obj.sub(r"``\1``", text) # Remove :math: markers. text = _re_math.sub(r"$\1$", text) # Convert content in stars to bold text = _re_stars.sub(r'**\1**', text) # Convert content in single backquotes to italic. text = _re_single_backquotes.sub(r'\1*\2*\3', text) # Convert content in double backquotes to single backquotes. text = _re_double_backquotes.sub(r'`\1`', text) # Remove remaining :: text = re.sub(r"::\n", "", text) return text # Re pattern to catch description and url in links of the form `description <url>`_. _re_links = re.compile(r"`([^`]+\S)\s+</*([^/][^>`]*)>`_+") # Re pattern to catch reference in links of the form :doc:`reference`. _re_simple_doc = re.compile(r":doc:`([^`<]*)`") # Re pattern to catch description and reference in links of the form :doc:`description <reference>`. _re_doc_with_description = re.compile(r":doc:`([^`<]+\S)\s+</*([^/][^>`]*)>`") # Re pattern to catch reference in links of the form :ref:`reference`. _re_simple_ref = re.compile(r":ref:`([^`<]*)`") # Re pattern to catch description and reference in links of the form :ref:`description <reference>`. _re_ref_with_description = re.compile(r":ref:`([^`<]+\S)\s+<([^>]*)>`") def convert_rst_links(text): """ Convert the rst links in text to markdown.""" # Links of the form :doc:`page` text = _re_simple_doc.sub(r'[\1](' + DOC_BASE_URL + r'\1.html)', text) # Links of the form :doc:`text <page>` text = _re_doc_with_description.sub(r'[\1](' + DOC_BASE_URL + r'\2.html)', text) # Refs of the form :ref:`page` text = _re_simple_ref.sub(r'[\1](#\1)', text) # Refs of the form :ref:`text <page>` text = _re_ref_with_description.sub(r'[\1](#\2)', text) # Other links def _rep_links(match): text,url = match.groups() if not url.startswith('http'): url = DOC_BASE_URL + url return f"[{text}]({url})" text = _re_links.sub(_rep_links, text) return text ################################### # Notes, math and reference # ################################### def remove_indentation(text): """ Remove the indendation found in the first line in text.""" lines = text.split("\n") indent = _re_indent.search(lines[0]).groups()[0] new_lines = [line[len(indent):] for line in lines] return "\n".join(new_lines) # For now we just do **NOTE_TYPE:** text, maybe there is some clever html solution to have something nicer. def convert_to_note(text, note_type): """ Convert text to a note of note_type.""" text = remove_indentation(text) lines = text.split("\n") new_lines = [f"> **{note_type.upper()}:** {lines[0]}"] new_lines += [f"> {line}" for line in lines[1:]] return "\n".join(new_lines) def convert_math(text): """ Convert text to disaply mode LaTeX.""" text = remove_indentation(text) return f"$${text}$$" def convert_anchor(text): """ Convert text to an anchor that can be used in the notebook.""" anchor_name = _re_anchor_section.search(text).groups()[0] return f"<a id='{anchor_name}'></a>" ################################### # Images # ################################### _re_attr_rst = re.compile(r"^\s*:(\S+):\s*(\S.*)$") def convert_image(image_name, text, pref=None, origin_folder=None, dest_folder=None): """ Convert text to proper html code for image_name. Optionally copy image from origin_folder to dest_folder.""" # Copy the image if necessary if origin_folder is not None and dest_folder is not None: origin_file = os.path.join(origin_folder, image_name) dest_file = os.path.join(dest_folder, image_name) if not os.path.isfile(dest_file): os.makedirs(os.path.dirname(dest_file), exist_ok=True) shutil.copy(origin_file, dest_file) attrs = {'src': image_name if pref is None else os.path.join(pref, image_name)} for line in text.split("\n"): if _re_attr_rst.search(line) is not None: key, attr = _re_attr_rst.search(line).groups() attrs[key] = attr html = " ".join([f'{key}="{value}"' for key, value in attrs.items()]) return f"<img {html}/>" ################################### # Tables # ################################### # Matches lines with a pattern of a table new line in rst. _re_ignore_line_table = re.compile("^(\+[\-\s]+)+\+\s*$") # Matches lines with a pattern of a table new line in rst, with a first column empty. _re_ignore_line_table1 = re.compile("^\|\s+(\+[\-\s]+)+\+\s*$") # Matches lines with a pattern of a first table line in rst. _re_sep_line_table = re.compile("^(\+[=\s]+)+\+\s*$") def convert_table(text): """ Convert a table in text from rst to markdown.""" lines = text.split("\n") new_lines = [] for line in lines: if _re_ignore_line_table.search(line) is not None: continue if _re_ignore_line_table1.search(line) is not None: continue if _re_sep_line_table.search(line) is not None: line = line.replace('=', '-').replace('+', '|') new_lines.append(line) return "\n".join(new_lines) ################################### # Code cleaning # ################################### # Matches the pytorch code tag. _re_pytorch = re.compile(r"## PYTORCH CODE") # Matches the tensorflow code tag. _re_tensorflow = re.compile(r"## TENSORFLOW CODE") def split_frameworks(code): """ Split code between the two frameworks (if it has two versions) with PyTorch first.""" if _re_pytorch.search(code) is None or _re_tensorflow.search(code) is None: return (code,) lines = code.split("\n") is_pytorch_first = _re_pytorch.search(lines[0]) is not None re_split = _re_tensorflow if is_pytorch_first else _re_pytorch i = 1 while re_split.search(lines[i]) is None: i += 1 j = i-1 while len(lines[j]) == 0: j -= 1 return ("\n".join(lines[:j+1]), "\n".join(lines[i:])) if is_pytorch_first else ("\n".join(lines[i:]), "\n".join(lines[:j+1])) # Matches any doctest pattern. _re_doctest = re.compile(r"^(>>>|\.\.\.)") def parse_code_and_output(code): """ Parse code to remove indentation, doctest prompts and split between source and theoretical output.""" lines = code.split("\n") indent = _re_indent.search(lines[0]).groups()[0] has_doctest = False input_lines = [] output_lines = [] for line in lines: if len(line) > 0: line = line[len(indent):] if _re_doctest.search(line): has_doctest = True line = line[4:] input_lines.append(line) elif has_doctest: if len(line) > 0: output_lines.append(line) else: input_lines.append(line) return "\n".join(input_lines), "\n".join(output_lines) ################################### # All together! # ################################### def markdown_cell(md): """ Create a markdown cell with md inside.""" return nbformat.notebooknode.NotebookNode({'cell_type': 'markdown', 'source': md, 'metadata': {}}) def code_cell(code, output=None): """ Create a code cell with `code` and optionally, `output`.""" if output is None or len(output) == 0: outputs = [] else: outputs = [nbformat.notebooknode.NotebookNode({ 'data': {'text/plain': output}, 'execution_count': None, 'metadata': {}, 'output_type': 'execute_result' })] return nbformat.notebooknode.NotebookNode( {'cell_type': 'code', 'execution_count': None, 'source': code, 'metadata': {}, 'outputs': outputs}) def create_notebook(cells): """ Create a notebook with `cells`.""" return nbformat.notebooknode.NotebookNode( {'cells': cells, 'metadata': {}, 'nbformat': 4, 'nbformat_minor': 4, }) def rm_first_line(text): """ Remove the first line in `text`.""" return '\n'.join(text.split('\n')[1:]) # For the first cell of the notebook INSTALL_CODE = """# Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ def convert_rst_file_to_notebook( rst_file, notebook_fname, framework=None, img_prefix=None, origin_folder=None, dest_folder=None ): r""" Convert rst_file to a notebook named notebook_fname. Args: - rst_file (:obj:`str`): The doc file to convert (in rst format). - notebook_fname (:obj:`str`): The output notebook file name (will be replaced if it exists). - framework (:obj:`str`, `optional`): If provided, must be :obj:`"pt"` or :obj:`"tf"`. In this case, only the PyTorch (resp. TensorFlow) version of the code is kept. - img_prefix (:obj:`str`, `optional`): If provided, will be inserted at the beginning of each image filename (in the `pytorch` or `tensorflow` folder, we need to add ../ to each image file to find them). - origin_folder (:obj:`str`, `optional`): If provided in conjunction with :obj:`dest_folder`, images encountered will be copied from this folder to :obj:`dest_folder`. - dest_folder (:obj:`str`, `optional`): If provided in conjunction with :obj:`origin_folder`, images encountered will be copied from :obj:`origin_folder` to this folder. """ with open(rst_file, 'r') as f: content = f.read() lines = content.split("\n") lines = process_titles(lines) blocks = split_blocks(lines) cells = [code_cell(INSTALL_CODE)] for block,block_type in blocks: if block_type == 'title' or block_type == 'prose': block = convert_table(convert_rst_formatting(convert_rst_links(block))) cells.append(markdown_cell(block)) elif block_type == 'anchor': block = convert_anchor(block) cells.append(markdown_cell(block)) elif block_type.startswith('code-block'): codes = split_frameworks(block) if framework == 'pt' and len(codes) > 1: codes = (rm_first_line(codes[0]),) elif framework == 'tf' and len(codes) > 1: codes = (rm_first_line(codes[1]),) for code in codes: source,output = parse_code_and_output(code) if block_type.endswith('bash'): lines = source.split("\n") new_lines = [line if line.startswith("#") else f"! {line}" for line in lines] source = "\n".join(new_lines) cells.append(code_cell(source, output=output)) elif block_type.startswith("image"): image_name = block_type[len("image "):] block = convert_image( image_name, block, pref=img_prefix, origin_folder=origin_folder, dest_folder=dest_folder ) cells.append(markdown_cell(block)) elif block_type == "math": block = convert_math(block) cells.append(markdown_cell(block)) else: block = convert_rst_formatting(convert_rst_links(block)) block = convert_to_note(block, block_type) cells.append(markdown_cell(block)) notebook = create_notebook(cells) nbformat.write(notebook, notebook_fname, version=4) def convert_all_tutorials(path_to_docs=None, path_to_dest=None): """ Convert all tutorials into notebooks.""" path_to_docs = PATH_TO_DOCS if path_to_docs is None else path_to_docs path_to_dest = PATH_TO_DEST if path_to_dest is None else path_to_dest for folder in ["pytorch", "tensorflow"]: os.makedirs(os.path.join(path_to_dest, folder), exist_ok=True) for file in TUTORIAL_FILES: notebook_name = os.path.splitext(file)[0] + ".ipynb" doc_file = os.path.join(path_to_docs, file) notebook_file = os.path.join(path_to_dest, notebook_name) convert_rst_file_to_notebook(doc_file, notebook_file, origin_folder=path_to_docs, dest_folder=path_to_dest) for folder, framework in zip(["pytorch", "tensorflow"], ["pt", "tf"]): notebook_file = os.path.join(os.path.join(path_to_dest, folder), notebook_name) convert_rst_file_to_notebook(doc_file, notebook_file, framework=framework, img_prefix="..") if __name__ == "__main__": convert_all_tutorials()
notebooks/utils/convert_doc_to_notebooks.py/0
{ "file_path": "notebooks/utils/convert_doc_to_notebooks.py", "repo_id": "notebooks", "token_count": 7880 }
157
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Model merging Training a model for each task can be costly, take up storage space, and the models aren't able to learn new information to improve their performance. Multitask learning can overcome some of these limitations by training a model to learn several tasks, but it is expensive to train and designing a dataset for it is challenging. *Model merging* offers a solution to these challenges by combining multiple pretrained models into one model, giving it the combined abilities of each individual model without any additional training. PEFT provides several methods for merging models like a linear or SVD combination. This guide focuses on two methods that are more efficient for merging LoRA adapters by eliminating redundant parameters: * [TIES](https://hf.co/papers/2306.01708) - TrIm, Elect, and Merge (TIES) is a three-step method for merging models. First, redundant parameters are trimmed, then conflicting signs are resolved into an aggregated vector, and finally the parameters whose signs are the same as the aggregate sign are averaged. This method takes into account that some values (redundant and sign disagreement) can degrade performance in the merged model. * [DARE](https://hf.co/papers/2311.03099) - Drop And REscale is a method that can be used to prepare for other model merging methods like TIES. It works by randomly dropping parameters according to a drop rate and rescaling the remaining parameters. This helps to reduce the number of redundant and potentially interfering parameters among multiple models. Models are merged with the [`~LoraModel.add_weighted_adapter`] method, and the specific model merging method is specified in the `combination_type` parameter. ## Merge method With TIES and DARE, merging is enabled by setting `combination_type` and `density` to a value of the weights to keep from the individual models. For example, let's merge three finetuned [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) models: [tinyllama_lora_nobots](https://huggingface.co/smangrul/tinyllama_lora_norobots), [tinyllama_lora_sql](https://huggingface.co/smangrul/tinyllama_lora_sql), and [tinyllama_lora_adcopy](https://huggingface.co/smangrul/tinyllama_lora_adcopy). <Tip warninig={true}> When you're attempting to merge fully trained models with TIES, you should be aware of any special tokens each model may have added to the embedding layer which are not a part of the original checkpoint's vocabulary. This may cause an issue because each model may have added a special token to the same embedding position. If this is the case, you should use the [`~transformers.PreTrainedModel.resize_token_embeddings`] method to avoid merging the special tokens at the same embedding index. <br> This shouldn't be an issue if you're only merging LoRA adapters trained from the same base model. </Tip> Load a base model and can use the [`~PeftModel.load_adapter`] method to load and assign each adapter a name: ```py from peft import PeftConfig, PeftModel from transformers import AutoModelForCausalLM, AutoTokenizer import torch config = PeftConfig.from_pretrained("smangrul/tinyllama_lora_norobots") model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, load_in_4bit=True, device_map="auto").eval() tokenizer = AutoTokenizer.from_pretrained("smangrul/tinyllama_lora_norobots") model = PeftModel.from_pretrained(model, "smangrul/tinyllama_lora_norobots", adapter_name="norobots") _ = model.load_adapter("smangrul/tinyllama_lora_sql", adapter_name="sql") _ = model.load_adapter("smangrul/tinyllama_lora_adcopy", adapter_name="adcopy") ``` Set the adapters, weights, `adapter_name`, `combination_type`, and `density` with the [`~LoraModel.add_weighted_adapter`] method. <hfoptions id="merge-method"> <hfoption id="TIES"> Weight values greater than `1.0` typically produce better results because they preserve the correct scale. A good default starting value for the weights is to set all values to `1.0`. ```py adapters = ["norobots", "adcopy", "sql"] weights = [2.0, 1.0, 1.0] adapter_name = "merge" density = 0.2 model.add_weighted_adapter(adapters, weights, adapter_name, combination_type="ties", density=density) ``` </hfoption> <hfoption id="DARE"> ```py adapters = ["norobots", "adcopy", "sql"] weights = [2.0, 0.3, 0.7] adapter_name = "merge" density = 0.2 model.add_weighted_adapter(adapters, weights, adapter_name, combination_type="dare_ties", density=density) ``` </hfoption> </hfoptions> Set the newly merged model as the active model with the [`~LoraModel.set_adapter`] method. ```py model.set_adapter("merge") ``` Now you can use the merged model as an instruction-tuned model to write ad copy or SQL queries! <hfoptions id="ties"> <hfoption id="instruct"> ```py messages = [ {"role": "user", "content": "Write an essay about Generative AI."}, ] text = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) inputs = tokenizer(text, return_tensors="pt") inputs = {k: v.to("cuda") for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True, top_p=0.95, temperature=0.2, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id) print(tokenizer.decode(outputs[0])) ``` </hfoption> <hfoption id="ad copy"> ```py messages = [ {"role": "system", "content": "Create a text ad given the following product and description."}, {"role": "user", "content": "Product: Sony PS5 PlayStation Console\nDescription: The PS5 console unleashes new gaming possibilities that you never anticipated."}, ] text = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) inputs = tokenizer(text, return_tensors="pt") inputs = {k: v.to("cuda") for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, top_p=0.95, temperature=0.2, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id) print(tokenizer.decode(outputs[0])) ``` </hfoption> <hfoption id="SQL"> ```py text = """Table: 2-11365528-2 Columns: ['Team', 'Head Coach', 'President', 'Home Ground', 'Location'] Natural Query: Who is the Head Coach of the team whose President is Mario Volarevic? SQL Query:""" inputs = tokenizer(text, return_tensors="pt") inputs = {k: v.to("cuda") for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=64, repetition_penalty=1.1, eos_token_id=tokenizer("</s>").input_ids[-1]) print(tokenizer.decode(outputs[0])) ``` </hfoption> </hfoptions>
peft/docs/source/developer_guides/model_merging.md/0
{ "file_path": "peft/docs/source/developer_guides/model_merging.md", "repo_id": "peft", "token_count": 2263 }
158
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch import torch.nn as nn from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, ) from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model class Shell(nn.Module): def __init__(self, weight, bias=None): super().__init__() self.weight = nn.Parameter(weight, requires_grad=False) if bias is not None: self.bias = nn.Parameter(bias, requires_grad=False) def unwrap_model(model, sub_module_name=".base_layer"): sub_module_name_list = [k.split(sub_module_name)[0] for k in model.state_dict().keys() if sub_module_name in k] sub_module_name_set = set(sub_module_name_list) for name in sub_module_name_set: # get the parent of the submodule name_parent = ".".join(name.split(".")[:-1]) name_child = name.split(".")[-1] sub_module = model.get_submodule(name_parent) print(sub_module) # replace with shell child = getattr(sub_module, name_child) weight = getattr(child.base_layer, "weight", None) bias = getattr(child.base_layer, "bias", None) shell = Shell(weight, bias) setattr(sub_module, name_child, shell) print("You have unwrapped the model. Use it on your own risk.") def print_model(model, name): print("=" * 10 + name + "=" * 10) print(model) for name, param in model.named_parameters(): if torch.is_tensor(param): if param.dtype in [torch.float32, torch.float16]: print( name, param.shape, param.device, param.dtype, param.requires_grad, param.mean().item(), param.max().item(), ) else: print(name, param.shape, param.device, param.dtype, param.requires_grad) def arg_parse(): parser = argparse.ArgumentParser(description="Quantize a model with LoftQ.") parser.add_argument( "--model_name_or_path", type=str, default=None, required=True, help="The name or path of the fp32/16 model.", ) parser.add_argument( "--token", type=str, default=None, help="The access token to download model from HuggingFace Hub.", ) parser.add_argument( "--bits", type=int, default=4, help="The quantized bits", ) parser.add_argument( "--iter", type=int, default=1, help="The alternating steps in LoftQ", ) parser.add_argument( "--rank", type=int, default=16, help="The rank of the LoRA adapter", ) parser.add_argument( "--save_dir", type=str, default="./model_zoo/loftq/", help="The rank of the LoRA adapter", ) args = parser.parse_args() return args def quantize_and_save(): args = arg_parse() # Download weights and configure LoRA tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True) if any(name in args.model_name_or_path.lower() for name in ["llama", "mistral", "falcon"]): model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True) task_type = TaskType.CAUSAL_LM target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "down_proj", "gate_proj"] elif any(name in args.model_name_or_path.lower() for name in ["bart", "t5"]): model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, token=args.token) task_type = TaskType.SEQ_2_SEQ_LM target_modules = ["q_proj", "k_proj", "v_proj", "fc1", "fc2", "out_proj"] elif any(name in args.model_name_or_path.lower() for name in ["deberta", "roberta", "bert"]): model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, token=args.token) task_type = TaskType.SEQ_CLS target_modules = ["query_proj", "key_proj", "value_proj", "dense"] # embeddings not supported by peft else: raise NotImplementedError("Other models not supported yet.") # Config of LoftQ loftq_config = LoftQConfig(loftq_bits=args.bits, loftq_iter=args.iter) lora_config = LoraConfig( task_type=task_type, inference_mode=True, r=args.rank, lora_alpha=16 if task_type is TaskType.CAUSAL_LM else args.rank, lora_dropout=0.1, target_modules=target_modules, init_lora_weights="loftq", loftq_config=loftq_config, ) # Obtain LoftQ model lora_model = get_peft_model(model, lora_config) base_model = lora_model.get_base_model() # Save LoftQ model model_name = args.model_name_or_path.split("/")[-1] + f"-{args.bits}bit" + f"-{args.rank}rank" base_model_dir = os.path.join(args.save_dir, model_name) lora_model_dir = os.path.join(args.save_dir, model_name, "loft_init") # save lora adapters first lora_model.base_model.peft_config[ "default" ].base_model_name_or_path = base_model_dir # This can be a local path or Hub model id lora_model.base_model.peft_config["default"].init_lora_weights = True # Don't apply LoftQ when loading again lora_model.save_pretrained(lora_model_dir) print_model(lora_model, "lora_model") # remove lora adapters and save the backbone unwrap_model(base_model) base_model.save_pretrained(base_model_dir) tokenizer.save_pretrained(base_model_dir) print_model(base_model, "base_model") return base_model_dir, lora_model_dir if __name__ == "__main__": base_dir, lora_dir = quantize_and_save() # example command: # python quantize_save_load.py \ # --model_name_or_path meta-llama/Llama-2-7b-hf \ # --token XXX \ # --bits 4 --iter 5 --rank 16 \ # --save_dir ./model_zoo/loftq/
peft/examples/loftq_finetuning/quantize_save_load.py/0
{ "file_path": "peft/examples/loftq_finetuning/quantize_save_load.py", "repo_id": "peft", "token_count": 2835 }
159
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from .layer import AdaLoraLayer class SVDQuantLinear(torch.nn.Module, AdaLoraLayer): def __init__( self, base_layer, adapter_name, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() AdaLoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def forward(self, x: torch.Tensor) -> torch.Tensor: result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] lora_E = self.lora_E[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] ranknum = self.ranknum[active_adapter] + 1e-5 requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype if x.dtype != torch.float32: x = x.float() output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum # TODO: here, the dtype conversion is applied on the *whole expression*, # not the intermediate result, unlike for SVDLinear8bitLT and # SVDLinear4bit, is that correct? if requires_conversion: output = output.to(expected_dtype) result += output return result def __repr__(self) -> str: rep = super().__repr__() return "adalora." + rep
peft/src/peft/tuners/adalora/gptq.py/0
{ "file_path": "peft/src/peft/tuners/adalora/gptq.py", "repo_id": "peft", "token_count": 1173 }
160
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Any, Optional, Union from torch import nn from tqdm import tqdm from peft.tuners import adalora, loha, lokr, lora, oft from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, PeftType, _get_submodules, get_auto_gptq_quant_linear, ) # Collection of constants used for all tuners COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT) PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix] Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig] Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer) class MixedModel(BaseTuner): """ A class that allows to mix different types of adapters in a single model. Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. Args: model (:obj:`nn.Module`): The model to be tuned. config (:obj:`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (:obj:`str`): The name of the first adapter. """ def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None: super().__init__(model, config, adapter_name) def _check_new_adapter_config(self, config: Configs) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ if not isinstance(config, Configs.__args__): raise ValueError( f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}." ) biases = (getattr(config, "bias", None) for config in self.peft_config) biases = [bias for bias in biases if bias not in (None, "none")] if len(biases) > 1: raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(config: Configs, key: str): return check_target_module_exists(config, key) def _create_and_replace( self, config: Configs, *args: Any, **kwargs: Any, ) -> None: if isinstance(config, adalora.AdaLoraConfig): adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, lora.LoraConfig): lora.LoraModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, loha.LoHaConfig): loha.LoHaModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, lokr.LoKrConfig): lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, oft.OFTConfig): oft.OFTModel._create_and_replace(self, config, *args, **kwargs) else: raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") def _replace_module(self, parent, child_name, new_module, child) -> None: setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.get_base_layer() elif hasattr(child, "quant_linear_module"): # TODO maybe not necessary to have special treatment? child = child.quant_linear_module if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if any(prefix in name for prefix in PREFIXES): module.to(child.weight.device) if "ranknum" in name: module.to(child.weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if not any(prefix in n for prefix in PREFIXES): p.requires_grad = False for active_adapter in self.active_adapters: bias = getattr(self.peft_config[active_adapter], "bias", "none") if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "lora_only": # TODO: check if this is needed for other supported types for m in model.modules(): if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise ValueError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(config, adapter_name, target, **kwargs): gptq_quantization_config = kwargs.get("gptq_quantization_config", None) AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None): raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).") loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) if loaded_in_8bit or loaded_in_4bit: raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).") if isinstance(config, adalora.AdaLoraConfig): new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, lora.LoraConfig): new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, loha.LoHaConfig): new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, lokr.LoKrConfig): new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, oft.OFTConfig): new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs) else: raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): for active_adapter in self.active_adapters: val = getattr(self.peft_config[active_adapter], "bias", "none") if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: for module in self.model.modules(): if isinstance(module, Layers): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge layers when the model is gptq quantized") def merge_recursively(module): # helper function to recursively merge the base_layer of the target path = [] layer = module while hasattr(layer, "base_layer"): path.append(layer) layer = layer.base_layer for layer_before, layer_after in zip(path[:-1], path[1:]): layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names) layer_before.base_layer = layer_after.base_layer module.merge(safe_merge=safe_merge, adapter_names=adapter_names) key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: merge_recursively(target) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` new_module = target.modules_to_save[target.active_adapter] if hasattr(new_module, "base_layer"): # check if the module is itself a tuner layer if merge: new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) new_module = new_module.get_base_layer() setattr(parent, target_name, new_module) return self.model def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).") def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Deletes an existing adapter. Args: adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete. """ if isinstance(adapter_name, str): adapter_names = [adapter_name] else: adapter_names = adapter_name mismatched = set(adapter_names) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) for adapter_name in adapter_names: del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, BaseTunerLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> nn.Module: r""" This method merges the layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def generate(self, *args: Any, **kwargs: Any): return self.model.generate(*args, **kwargs)
peft/src/peft/tuners/mixed/model.py/0
{ "file_path": "peft/src/peft/tuners/mixed/model.py", "repo_id": "peft", "token_count": 6619 }
161
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest def pytest_addoption(parser): parser.addoption("--regression", action="store_true", default=False, help="run regression tests") def pytest_configure(config): config.addinivalue_line("markers", "regression: mark regression tests") def pytest_collection_modifyitems(config, items): if config.getoption("--regression"): return skip_regression = pytest.mark.skip(reason="need --regression option to run regression tests") for item in items: if "regression" in item.keywords: item.add_marker(skip_regression)
peft/tests/conftest.py/0
{ "file_path": "peft/tests/conftest.py", "repo_id": "peft", "token_count": 356 }
162
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import tempfile from unittest import TestCase import pytest import torch from parameterized import parameterized from torch.testing import assert_close from peft.mapping import get_peft_model from peft.peft_model import PeftModel from peft.tuners.multitask_prompt_tuning import MultitaskPromptTuningConfig, MultitaskPromptTuningInit from peft.utils.other import WEIGHTS_NAME, prepare_model_for_int8_training from peft.utils.save_and_load import get_peft_model_state_dict from tests.testing_common import PeftCommonTester def is_llama_available() -> bool: """Check if Llama is available in the transformers library (it's not in earlier versions).""" try: return importlib.util.find_spec("transformers.models.llama.modeling_llama") is not None except ModuleNotFoundError: return False if is_llama_available(): # We guard the import statement so that our unit tests will pass in CI environments # that don't have a transformers package with Llama. from transformers import LlamaConfig, LlamaForCausalLM class MultiTaskPromptTuningTester(TestCase, PeftCommonTester): """ Tests for the AdaptionPrompt model. Some of these tests were adapted from `test_peft_model.py` (which has been refactored since), but since we haven't checked in the test checkpoints for Llama into `hf-internal-testing`, we separate them for now. """ def setUp(self): """Check that llama is available in transformers package before running each test.""" if not is_llama_available(): self.skipTest("Llama not available in transformers. Skipping test.") @staticmethod def _create_test_llama_config(): """Create a test config for a small Llama model for testing.""" return LlamaConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, use_cache=False, ) @classmethod def _create_multitask_prompt_tuning_config(cls) -> MultitaskPromptTuningConfig: return MultitaskPromptTuningConfig( task_type="CAUSAL_LM", num_virtual_tokens=50, num_tasks=3, prompt_tuning_init_text=( "classify the following into either positive or negative, or entailment, neutral or contradiction:" ), ) def test_prepare_for_training(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) model = get_peft_model(model, self._create_multitask_prompt_tuning_config()) model = model.to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) assert not dummy_output.requires_grad def test_prepare_for_int8_training(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) model = prepare_model_for_int8_training(model) model = model.to(self.torch_device) for param in model.parameters(): assert not param.requires_grad model = get_peft_model(model, self._create_multitask_prompt_tuning_config()) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) assert dummy_output.requires_grad def test_save_pretrained(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) model = get_peft_model(model, self._create_multitask_prompt_tuning_config()) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 3 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `pytorch_model.bin` is not present assert not os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) def test_save_pretrained_regression(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) model = get_peft_model(model, self._create_multitask_prompt_tuning_config()) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=False) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 3 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present for regression assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `pytorch_model.bin` is not present assert not os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) def test_generate(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) model = get_peft_model(model, self._create_multitask_prompt_tuning_config()) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) task_ids = torch.LongTensor([1, 2]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask, task_ids=task_ids) # check if `generate` works if positional arguments are passed _ = model.generate(input_ids, attention_mask=attention_mask, task_ids=task_ids) def test_use_cache(self) -> None: """Test that MultiTaskPromptTuning works when Llama config use_cache=True.""" torch.manual_seed(0) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) task_ids = torch.LongTensor([1, 2]).to(self.torch_device) original = LlamaForCausalLM(self._create_test_llama_config()).eval() mpt = get_peft_model(original, self._create_multitask_prompt_tuning_config()) mpt = mpt.to(self.torch_device) expected = mpt.generate(input_ids=input_ids, max_length=8, task_ids=task_ids) # Set use_cache = True and generate output again. mpt.base_model.config.use_cache = True actual = mpt.generate(input_ids=input_ids, max_length=8, task_ids=task_ids) assert_close(expected, actual, rtol=0, atol=0) def test_bf16_inference(self) -> None: """Test that MultiTaskPromptTuning works when Llama using a half-precision model.""" input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) task_ids = torch.tensor([1, 2]).to(self.torch_device) original = LlamaForCausalLM.from_pretrained( "trl-internal-testing/tiny-random-LlamaForCausalLM", torch_dtype=torch.bfloat16 ) mpt = get_peft_model(original, self._create_multitask_prompt_tuning_config()) mpt = mpt.to(self.torch_device) _ = mpt.generate(input_ids=input_ids, task_ids=task_ids) def test_generate_text_with_random_init(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) config = self._create_multitask_prompt_tuning_config() config.prompt_tuning_init = MultitaskPromptTuningInit.RANDOM model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) task_ids = torch.LongTensor([0]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask, task_ids=task_ids) with pytest.raises(ValueError): # check if `generate` raises an error if task_ids are not passed _ = model.generate(input_ids, attention_mask=attention_mask) @parameterized.expand( [ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, MultitaskPromptTuningInit.ONLY_SOURCE_SHARED, ], ) def test_generate_text_with_other_init(self, prompt_tuning_init) -> None: with tempfile.TemporaryDirectory() as tmp_dirname: model = LlamaForCausalLM(self._create_test_llama_config()) model = get_peft_model(model, self._create_multitask_prompt_tuning_config()) model.save_pretrained(tmp_dirname, safe_serialization=False) # bc torch.load is used config = MultitaskPromptTuningConfig( task_type="CAUSAL_LM", num_virtual_tokens=50, num_tasks=1, prompt_tuning_init_text=( "classify the following into either positive or negative, or entailment, neutral or contradiction:" ), prompt_tuning_init=prompt_tuning_init, prompt_tuning_init_state_dict_path=os.path.join(tmp_dirname, WEIGHTS_NAME), ) model = LlamaForCausalLM(self._create_test_llama_config()) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) task_ids = torch.LongTensor([0]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask, task_ids=task_ids) with pytest.raises(ValueError): # check if `generate` raises an error if task_ids are not passed _ = model.generate(input_ids, attention_mask=attention_mask)
peft/tests/test_multitask_prompt_tuning.py/0
{ "file_path": "peft/tests/test_multitask_prompt_tuning.py", "repo_id": "peft", "token_count": 5662 }
163
#!/usr/bin/env python3 """ Model Benchmark Script An inference and train step benchmark script for timm models. Hacked together by Ross Wightman (https://github.com/rwightman) """ import argparse import csv import json import logging import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import resolve_data_config from timm.layers import set_fast_norm from timm.models import create_model, is_model, list_models from timm.optim import create_optimizer_v2 from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\ reparameterize_model has_apex = False try: from apex import amp has_apex = True except ImportError: pass has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: from deepspeed.profiling.flops_profiler import get_model_profile has_deepspeed_profiling = True except ImportError as e: has_deepspeed_profiling = False try: from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis has_fvcore_profiling = True except ImportError as e: FlopCountAnalysis = None has_fvcore_profiling = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch Benchmark') # benchmark specific args parser.add_argument('--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument('--bench', default='both', type=str, help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") parser.add_argument('--detail', action='store_true', default=False, help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') parser.add_argument('--no-retry', action='store_true', default=False, help='Do not decay batch size and retry on error.') parser.add_argument('--results-file', default='', type=str, help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--num-warm-iter', default=10, type=int, help='Number of warmup iterations (default: 10)') parser.add_argument('--num-bench-iter', default=40, type=int, help='Number of benchmark iterations (default: 40)') parser.add_argument('--device', default='cuda', type=str, help="device to run benchmark on") # common inference / train args parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', help='model architecture (default: resnet50)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='Run inference at train size, not test-input-size if it exists.') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') parser.add_argument('--amp', action='store_true', default=False, help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.') parser.add_argument('--precision', default='float32', type=str, help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) # codegen (model compilation) options scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help="Enable compilation w/ specified backend (default: inductor).") scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help="Enable AOT Autograd optimization.") # train optimizer parameters parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay (default: 0.0001)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') # model regularization / loss params that impact model or loss fn parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') def timestamp(sync=False): return time.perf_counter() def cuda_timestamp(sync=False, device=None): if sync: torch.cuda.synchronize(device=device) return time.perf_counter() def count_params(model: nn.Module): return sum([m.numel() for m in model.parameters()]) def resolve_precision(precision: str): assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32') amp_dtype = None # amp disabled model_dtype = torch.float32 data_dtype = torch.float32 if precision == 'amp': amp_dtype = torch.float16 elif precision == 'amp_bfloat16': amp_dtype = torch.bfloat16 elif precision == 'float16': model_dtype = torch.float16 data_dtype = torch.float16 elif precision == 'bfloat16': model_dtype = torch.bfloat16 data_dtype = torch.bfloat16 return amp_dtype, model_dtype, data_dtype def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): _, macs, _ = get_model_profile( model=model, input_shape=(batch_size,) + input_size, # input shape/resolution print_profile=detailed, # prints the model graph with the measured profile attached to each module detailed=detailed, # print the detailed profile warm_up=10, # the number of warm-ups before measuring the time of each module as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k) output_file=None, # path to the output file. If None, the profiler prints to stdout. ignore_modules=None) # the list of modules to ignore in the profiling return macs, 0 # no activation count in DS def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): if force_cpu: model = model.to('cpu') device, dtype = next(model.parameters()).device, next(model.parameters()).dtype example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) fca = FlopCountAnalysis(model, example_input) aca = ActivationCountAnalysis(model, example_input) if detailed: fcs = flop_count_str(fca) print(fcs) return fca.total(), aca.total() class BenchmarkRunner: def __init__( self, model_name, detail=False, device='cuda', torchscript=False, torchcompile=None, aot_autograd=False, reparam=False, precision='float32', fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs ): self.model_name = model_name self.detail = detail self.device = device self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision) self.channels_last = kwargs.pop('channels_last', False) if self.amp_dtype is not None: self.amp_autocast = partial(torch.cuda.amp.autocast, dtype=self.amp_dtype) else: self.amp_autocast = suppress if fuser: set_jit_fuser(fuser) self.model = create_model( model_name, num_classes=kwargs.pop('num_classes', None), in_chans=3, global_pool=kwargs.pop('gp', 'fast'), scriptable=torchscript, drop_rate=kwargs.pop('drop', 0.), drop_path_rate=kwargs.pop('drop_path', None), drop_block_rate=kwargs.pop('drop_block', None), **kwargs.pop('model_kwargs', {}), ) if reparam: self.model = reparameterize_model(self.model) self.model.to( device=self.device, dtype=self.model_dtype, memory_format=torch.channels_last if self.channels_last else None, ) self.num_classes = self.model.num_classes self.param_count = count_params(self.model) _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) self.input_size = data_config['input_size'] self.batch_size = kwargs.pop('batch_size', 256) self.compiled = False if torchscript: self.model = torch.jit.script(self.model) self.compiled = True elif torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.' torch._dynamo.reset() self.model = torch.compile(self.model, backend=torchcompile) self.compiled = True elif aot_autograd: assert has_functorch, "functorch is needed for --aot-autograd" self.model = memory_efficient_fusion(self.model) self.compiled = True self.example_inputs = None self.num_warm_iter = num_warm_iter self.num_bench_iter = num_bench_iter self.log_freq = num_bench_iter // 5 if 'cuda' in self.device: self.time_fn = partial(cuda_timestamp, device=self.device) else: self.time_fn = timestamp def _init_input(self): self.example_inputs = torch.randn( (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) if self.channels_last: self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) class InferenceBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.eval() def run(self): def _step(): t_step_start = self.time_fn() with self.amp_autocast(): output = self.model(self.example_inputs) t_step_end = self.time_fn(True) return t_step_end - t_step_start _logger.info( f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') with torch.no_grad(): self._init_input() for _ in range(self.num_warm_iter): _step() total_step = 0. num_samples = 0 t_run_start = self.time_fn() for i in range(self.num_bench_iter): delta_fwd = _step() total_step += delta_fwd num_samples += self.batch_size num_steps = i + 1 if num_steps % self.log_freq == 0: _logger.info( f"Infer [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_end = self.time_fn(True) t_run_elapsed = t_run_end - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) retries = 0 if self.compiled else 2 # skip profiling if model is scripted while retries: retries -= 1 try: if has_deepspeed_profiling: macs, _ = profile_deepspeed(self.model, self.input_size) results['gmacs'] = round(macs / 1e9, 2) elif has_fvcore_profiling: macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries) results['gmacs'] = round(macs / 1e9, 2) results['macts'] = round(activations / 1e6, 2) except RuntimeError as e: pass _logger.info( f"Inference benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") return results class TrainBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.train() self.loss = nn.CrossEntropyLoss().to(self.device) self.target_shape = tuple() self.optimizer = create_optimizer_v2( self.model, opt=kwargs.pop('opt', 'sgd'), lr=kwargs.pop('lr', 1e-4)) if kwargs.pop('grad_checkpointing', False): self.model.set_grad_checkpointing() def _gen_target(self, batch_size): return torch.empty( (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) def run(self): def _step(detail=False): self.optimizer.zero_grad() # can this be ignored? t_start = self.time_fn() t_fwd_end = t_start t_bwd_end = t_start with self.amp_autocast(): output = self.model(self.example_inputs) if isinstance(output, tuple): output = output[0] if detail: t_fwd_end = self.time_fn(True) target = self._gen_target(output.shape[0]) self.loss(output, target).backward() if detail: t_bwd_end = self.time_fn(True) self.optimizer.step() t_end = self.time_fn(True) if detail: delta_fwd = t_fwd_end - t_start delta_bwd = t_bwd_end - t_fwd_end delta_opt = t_end - t_bwd_end return delta_fwd, delta_bwd, delta_opt else: delta_step = t_end - t_start return delta_step _logger.info( f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') self._init_input() for _ in range(self.num_warm_iter): _step() t_run_start = self.time_fn() if self.detail: total_fwd = 0. total_bwd = 0. total_opt = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_fwd, delta_bwd, delta_opt = _step(True) num_samples += self.batch_size total_fwd += delta_fwd total_bwd += delta_bwd total_opt += delta_opt num_steps = (i + 1) if num_steps % self.log_freq == 0: total_step = total_fwd + total_bwd + total_opt _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," f" {1000 * total_opt / num_steps:0.3f} ms/step opt." ) total_step = total_fwd + total_bwd + total_opt t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), opt_time=round(1000 * total_opt / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) else: total_step = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_step = _step(False) num_samples += self.batch_size total_step += delta_step num_steps = (i + 1) if num_steps % self.log_freq == 0: _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Train benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") return results class ProfileRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', profiler='', **kwargs): super().__init__(model_name=model_name, device=device, **kwargs) if not profiler: if has_deepspeed_profiling: profiler = 'deepspeed' elif has_fvcore_profiling: profiler = 'fvcore' assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work." self.profiler = profiler self.model.eval() def run(self): _logger.info( f'Running profiler on {self.model_name} w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') macs = 0 activations = 0 if self.profiler == 'deepspeed': macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) elif self.profiler == 'fvcore': macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) results = dict( gmacs=round(macs / 1e9, 2), macts=round(activations / 1e6, 2), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Profile of {self.model_name} done. " f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") return results def _try_run( model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False ): batch_size = initial_batch_size results = dict() error_str = 'Unknown' while batch_size: try: torch.cuda.empty_cache() bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) results = bench.run() return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running benchmark.') if not check_batch_size_retry(error_str): _logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.') break if no_batch_size_retry: break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str return results def benchmark(args): if args.amp: _logger.warning("Overriding precision to 'amp' since --amp flag set.") args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype]) _logger.info(f'Benchmarking in {args.precision} precision. ' f'{"NHWC" if args.channels_last else "NCHW"} layout. ' f'torchscript {"enabled" if args.torchscript else "disabled"}') bench_kwargs = vars(args).copy() bench_kwargs.pop('amp') model = bench_kwargs.pop('model') batch_size = bench_kwargs.pop('batch_size') bench_fns = (InferenceBenchmarkRunner,) prefixes = ('infer',) if args.bench == 'both': bench_fns = ( InferenceBenchmarkRunner, TrainBenchmarkRunner ) prefixes = ('infer', 'train') elif args.bench == 'train': bench_fns = TrainBenchmarkRunner, prefixes = 'train', elif args.bench.startswith('profile'): # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore if 'deepspeed' in args.bench: assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter" bench_kwargs['profiler'] = 'deepspeed' elif 'fvcore' in args.bench: assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter" bench_kwargs['profiler'] = 'fvcore' bench_fns = ProfileRunner, batch_size = 1 model_results = OrderedDict(model=model) for prefix, bench_fn in zip(prefixes, bench_fns): run_results = _try_run( model, bench_fn, bench_kwargs=bench_kwargs, initial_batch_size=batch_size, no_batch_size_retry=args.no_retry, ) if prefix and 'error' not in run_results: run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} model_results.update(run_results) if 'error' in run_results: break if 'error' not in model_results: param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) model_results.setdefault('param_count', param_count) model_results.pop('train_param_count', 0) return model_results def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if args.fast_norm: set_fast_norm() if args.model_list: args.model = '' with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names] elif args.model == 'all': # validate all models in a list of names with pretrained checkpoints args.pretrained = True model_names = list_models(pretrained=True, exclude_filters=['*in21k']) model_cfgs = [(n, None) for n in model_names] elif not is_model(args.model): # model name doesn't exist, try as wildcard filter model_names = list_models(args.model) model_cfgs = [(n, None) for n in model_names] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: for m, _ in model_cfgs: if not m: continue args.model = m r = benchmark(args) if r: results.append(r) time.sleep(10) except KeyboardInterrupt as e: pass sort_key = 'infer_samples_per_sec' if 'train' in args.bench: sort_key = 'train_samples_per_sec' elif 'profile' in args.bench: sort_key = 'infer_gmacs' results = filter(lambda x: sort_key in x, results) results = sorted(results, key=lambda x: x[sort_key], reverse=True) else: results = benchmark(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) # output results in JSON to stdout w/ delimiter for runner script print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()
pytorch-image-models/benchmark.py/0
{ "file_path": "pytorch-image-models/benchmark.py", "repo_id": "pytorch-image-models", "token_count": 13272 }
164
# AdvProp (EfficientNet) **AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples. The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{xie2020adversarial, title={Adversarial Examples Improve Image Recognition}, author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le}, year={2020}, eprint={1911.09665}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: AdvProp Paper: Title: Adversarial Examples Improve Image Recognition URL: https://paperswithcode.com/paper/adversarial-examples-improve-image Models: - Name: tf_efficientnet_b0_ap In Collection: AdvProp Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21385973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b0_ap LR: 0.256 Epochs: 350 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.1% Top 5 Accuracy: 93.26% - Name: tf_efficientnet_b1_ap In Collection: AdvProp Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31515350 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b1_ap LR: 0.256 Epochs: 350 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.28% Top 5 Accuracy: 94.3% - Name: tf_efficientnet_b2_ap In Collection: AdvProp Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36800745 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b2_ap LR: 0.256 Epochs: 350 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.3% Top 5 Accuracy: 95.03% - Name: tf_efficientnet_b3_ap In Collection: AdvProp Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49384538 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b3_ap LR: 0.256 Epochs: 350 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.82% Top 5 Accuracy: 95.62% - Name: tf_efficientnet_b4_ap In Collection: AdvProp Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77993585 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b4_ap LR: 0.256 Epochs: 350 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.26% Top 5 Accuracy: 96.39% - Name: tf_efficientnet_b5_ap In Collection: AdvProp Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122403150 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b5_ap LR: 0.256 Epochs: 350 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.25% Top 5 Accuracy: 96.97% - Name: tf_efficientnet_b6_ap In Collection: AdvProp Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173237466 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b6_ap LR: 0.256 Epochs: 350 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.79% Top 5 Accuracy: 97.14% - Name: tf_efficientnet_b7_ap In Collection: AdvProp Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266850607 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b7_ap LR: 0.256 Epochs: 350 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.12% Top 5 Accuracy: 97.25% - Name: tf_efficientnet_b8_ap In Collection: AdvProp Metadata: FLOPs: 80962956270 Parameters: 87410000 File Size: 351412563 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b8_ap LR: 0.128 Epochs: 350 Crop Pct: '0.954' Momentum: 0.9 Batch Size: 2048 Image Size: '672' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.37% Top 5 Accuracy: 97.3% -->
pytorch-image-models/docs/models/.templates/models/advprop.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/advprop.md", "repo_id": "pytorch-image-models", "token_count": 5211 }
165
# (Gluon) ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/XieGDTH16, author = {Saining Xie and Ross B. Girshick and Piotr Doll{\'{a}}r and Zhuowen Tu and Kaiming He}, title = {Aggregated Residual Transformations for Deep Neural Networks}, journal = {CoRR}, volume = {abs/1611.05431}, year = {2016}, url = {http://arxiv.org/abs/1611.05431}, archivePrefix = {arXiv}, eprint = {1611.05431}, timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Gloun ResNeXt Paper: Title: Aggregated Residual Transformations for Deep Neural Networks URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep Models: - Name: gluon_resnext101_32x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177367414 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext101_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L193 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.33% Top 5 Accuracy: 94.91% - Name: gluon_resnext101_64x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 19954172928 Parameters: 83460000 File Size: 334737852 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext101_64x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L201 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.63% Top 5 Accuracy: 95.0% - Name: gluon_resnext50_32x4d In Collection: Gloun ResNeXt Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100441719 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: gluon_resnext50_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L185 Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.35% Top 5 Accuracy: 94.42% -->
pytorch-image-models/docs/models/.templates/models/gloun-resnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/gloun-resnext.md", "repo_id": "pytorch-image-models", "token_count": 1879 }
166
# NASNet **NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{zoph2018learning, title={Learning Transferable Architectures for Scalable Image Recognition}, author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, year={2018}, eprint={1707.07012}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: NASNet Paper: Title: Learning Transferable Architectures for Scalable Image Recognition URL: https://paperswithcode.com/paper/learning-transferable-architectures-for Models: - Name: nasnetalarge In Collection: NASNet Metadata: FLOPs: 30242402862 Parameters: 88750000 File Size: 356056626 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Techniques: - Label Smoothing - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 50x Tesla K40 GPUs ID: nasnetalarge Dropout: 0.5 Crop Pct: '0.911' Momentum: 0.9 Image Size: '331' Interpolation: bicubic Label Smoothing: 0.1 RMSProp $\epsilon$: 1.0 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562 Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.63% Top 5 Accuracy: 96.05% -->
pytorch-image-models/docs/models/.templates/models/nasnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/nasnet.md", "repo_id": "pytorch-image-models", "token_count": 730 }
167
# SK-ResNeXt **SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{li2019selective, title={Selective Kernel Networks}, author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, year={2019}, eprint={1903.06586}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: SKResNeXt Paper: Title: Selective Kernel Networks URL: https://paperswithcode.com/paper/selective-kernel-networks Models: - Name: skresnext50_32x4d In Collection: SKResNeXt Metadata: FLOPs: 5739845824 Parameters: 27480000 File Size: 110340975 Architecture: - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - Max Pooling - Residual Connection - Selective Kernel - Softmax Tasks: - Image Classification Training Data: - ImageNet Training Resources: 8x GPUs ID: skresnext50_32x4d LR: 0.1 Epochs: 100 Layers: 50 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.15% Top 5 Accuracy: 94.64% -->
pytorch-image-models/docs/models/.templates/models/skresnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/skresnext.md", "repo_id": "pytorch-image-models", "token_count": 822 }
168
# Scripts A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release. The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on [NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples). ## Training Script The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder. To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value: ```bash ./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4 ``` <Tip> It is recommended to use PyTorch 1.9+ w/ PyTorch native AMP and DDP instead of APEX AMP. --amp defaults to native AMP as of timm ver 0.4.3. --apex-amp will force use of APEX components if they are installed. </Tip> ## Validation / Inference Scripts Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script. To validate with the model's pretrained weights (if they exist): ```bash python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained ``` To run inference from a checkpoint: ```bash python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar ``` ## Training Examples ### EfficientNet-B2 with RandAugment - 80.4 top-1, 95.1 top-5 These params are for dual Titan RTX cards with NVIDIA Apex installed: ```bash ./distributed_train.sh 2 /imagenet/ --model efficientnet_b2 -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .016 ``` ### MixNet-XL with RandAugment - 80.5 top-1, 94.9 top-5 This params are for dual Titan RTX cards with NVIDIA Apex installed: ```bash ./distributed_train.sh 2 /imagenet/ --model mixnet_xl -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .969 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.3 --amp --lr .016 --dist-bn reduce ``` ### SE-ResNeXt-26-D and SE-ResNeXt-26-T These hparams (or similar) work well for a wide range of ResNet architecture, generally a good idea to increase the epoch # as the model size increases... ie approx 180-200 for ResNe(X)t50, and 220+ for larger. Increase batch size and LR proportionally for better GPUs or with AMP enabled. These params were for 2 1080Ti cards: ```bash ./distributed_train.sh 2 /imagenet/ --model seresnext26t_32x4d --lr 0.1 --warmup-epochs 5 --epochs 160 --weight-decay 1e-4 --sched cosine --reprob 0.4 --remode pixel -b 112 ``` ### EfficientNet-B3 with RandAugment - 81.5 top-1, 95.7 top-5 The training of this model started with the same command line as EfficientNet-B2 w/ RA above. After almost three weeks of training the process crashed. The results weren't looking amazing so I resumed the training several times with tweaks to a few params (increase RE prob, decrease rand-aug, increase ema-decay). Nothing looked great. I ended up averaging the best checkpoints from all restarts. The result is mediocre at default res/crop but oddly performs much better with a full image test crop of 1.0. ### EfficientNet-B0 with RandAugment - 77.7 top-1, 95.3 top-5 [Michael Klachko](https://github.com/michaelklachko) achieved these results with the command line for B2 adapted for larger batch size, with the recommended B0 dropout rate of 0.2. ```bash ./distributed_train.sh 2 /imagenet/ --model efficientnet_b0 -b 384 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .048 ``` ### ResNet50 with JSD loss and RandAugment (clean + 2x RA augs) - 79.04 top-1, 94.39 top-5 Trained on two older 1080Ti cards, this took a while. Only slightly, non statistically better ImageNet validation result than my first good AugMix training of 78.99. However, these weights are more robust on tests with ImageNetV2, ImageNet-Sketch, etc. Unlike my first AugMix runs, I've enabled SplitBatchNorm, disabled random erasing on the clean split, and cranked up random erasing prob on the 2 augmented paths. ```bash ./distributed_train.sh 2 /imagenet -b 64 --model resnet50 --sched cosine --epochs 200 --lr 0.05 --amp --remode pixel --reprob 0.6 --aug-splits 3 --aa rand-m9-mstd0.5-inc1 --resplit --split-bn --jsd --dist-bn reduce ``` ### EfficientNet-ES (EdgeTPU-Small) with RandAugment - 78.066 top-1, 93.926 top-5 Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model EMA was not used, final checkpoint is the average of 8 best checkpoints during training. ```bash ./distributed_train.sh 8 /imagenet --model efficientnet_es -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 ``` ### MobileNetV3-Large-100 - 75.766 top-1, 92,542 top-5 ```bash ./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9 ``` ### ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. ```bash ./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce ```
pytorch-image-models/hfdocs/source/training_script.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/training_script.mdx", "repo_id": "pytorch-image-models", "token_count": 2320 }
169
""" Quick n Simple Image Folder, Tarfile based DataSet Hacked together by / Copyright 2019, Ross Wightman """ import io import logging from typing import Optional import torch import torch.utils.data as data from PIL import Image from .readers import create_reader _logger = logging.getLogger(__name__) _ERROR_RETRY = 50 class ImageDataset(data.Dataset): def __init__( self, root, reader=None, split='train', class_map=None, load_bytes=False, input_img_mode='RGB', transform=None, target_transform=None, ): if reader is None or isinstance(reader, str): reader = create_reader( reader or '', root=root, split=split, class_map=class_map ) self.reader = reader self.load_bytes = load_bytes self.input_img_mode = input_img_mode self.transform = transform self.target_transform = target_transform self._consecutive_errors = 0 def __getitem__(self, index): img, target = self.reader[index] try: img = img.read() if self.load_bytes else Image.open(img) except Exception as e: _logger.warning(f'Skipped sample (index {index}, file {self.reader.filename(index)}). {str(e)}') self._consecutive_errors += 1 if self._consecutive_errors < _ERROR_RETRY: return self.__getitem__((index + 1) % len(self.reader)) else: raise e self._consecutive_errors = 0 if self.input_img_mode and not self.load_bytes: img = img.convert(self.input_img_mode) if self.transform is not None: img = self.transform(img) if target is None: target = -1 elif self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.reader) def filename(self, index, basename=False, absolute=False): return self.reader.filename(index, basename, absolute) def filenames(self, basename=False, absolute=False): return self.reader.filenames(basename, absolute) class IterableImageDataset(data.IterableDataset): def __init__( self, root, reader=None, split='train', class_map=None, is_training=False, batch_size=1, num_samples=None, seed=42, repeats=0, download=False, input_img_mode='RGB', input_key=None, target_key=None, transform=None, target_transform=None, max_steps=None, ): assert reader is not None if isinstance(reader, str): self.reader = create_reader( reader, root=root, split=split, class_map=class_map, is_training=is_training, batch_size=batch_size, num_samples=num_samples, seed=seed, repeats=repeats, download=download, input_img_mode=input_img_mode, input_key=input_key, target_key=target_key, max_steps=max_steps, ) else: self.reader = reader self.transform = transform self.target_transform = target_transform self._consecutive_errors = 0 def __iter__(self): for img, target in self.reader: if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) yield img, target def __len__(self): if hasattr(self.reader, '__len__'): return len(self.reader) else: return 0 def set_epoch(self, count): # TFDS and WDS need external epoch count for deterministic cross process shuffle if hasattr(self.reader, 'set_epoch'): self.reader.set_epoch(count) def set_loader_cfg( self, num_workers: Optional[int] = None, ): # TFDS and WDS readers need # workers for correct # samples estimate before loader processes created if hasattr(self.reader, 'set_loader_cfg'): self.reader.set_loader_cfg(num_workers=num_workers) def filename(self, index, basename=False, absolute=False): assert False, 'Filename lookup by index not supported, use filenames().' def filenames(self, basename=False, absolute=False): return self.reader.filenames(basename, absolute) class AugMixDataset(torch.utils.data.Dataset): """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" def __init__(self, dataset, num_splits=2): self.augmentation = None self.normalize = None self.dataset = dataset if self.dataset.transform is not None: self._set_transforms(self.dataset.transform) self.num_splits = num_splits def _set_transforms(self, x): assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' self.dataset.transform = x[0] self.augmentation = x[1] self.normalize = x[2] @property def transform(self): return self.dataset.transform @transform.setter def transform(self, x): self._set_transforms(x) def _normalize(self, x): return x if self.normalize is None else self.normalize(x) def __getitem__(self, i): x, y = self.dataset[i] # all splits share the same dataset base transform x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) # run the full augmentation on the remaining splits for _ in range(self.num_splits - 1): x_list.append(self._normalize(self.augmentation(x))) return tuple(x_list), y def __len__(self): return len(self.dataset)
pytorch-image-models/timm/data/dataset.py/0
{ "file_path": "pytorch-image-models/timm/data/dataset.py", "repo_id": "pytorch-image-models", "token_count": 2918 }
170
""" A dataset reader that reads tarfile based datasets This reader can extract image samples from: * a single tar of image files * a folder of multiple tarfiles containing imagefiles * a tar of tars containing image files Labels are based on the combined folder and/or tar name structure. Hacked together by / Copyright 2020 Ross Wightman """ import logging import os import pickle import tarfile from glob import glob from typing import List, Tuple, Dict, Set, Optional, Union import numpy as np from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader _logger = logging.getLogger(__name__) CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' class TarState: def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): self.tf: tarfile.TarFile = tf self.ti: tarfile.TarInfo = ti self.children: Dict[str, TarState] = {} # child states (tars within tars) def reset(self): self.tf = None def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions: Set[str]): sample_count = 0 for i, ti in enumerate(tf): if not ti.isfile(): continue dirname, basename = os.path.split(ti.path) name, ext = os.path.splitext(basename) ext = ext.lower() if ext == '.tar': with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: child_info = dict( name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') parent_info['children'].append(child_info) elif ext in extensions: parent_info['samples'].append(ti) sample_count += 1 return sample_count def extract_tarinfos( root, class_name_to_idx: Optional[Dict] = None, cache_tarinfo: Optional[bool] = None, extensions: Optional[Union[List, Tuple, Set]] = None, sort: bool = True ): extensions = get_img_extensions(as_set=True) if not extensions else set(extensions) root_is_tar = False if os.path.isfile(root): assert os.path.splitext(root)[-1].lower() == '.tar' tar_filenames = [root] root, root_name = os.path.split(root) root_name = os.path.splitext(root_name)[0] root_is_tar = True else: root_name = root.strip(os.path.sep).split(os.path.sep)[-1] tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) num_tars = len(tar_filenames) tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) assert num_tars, f'No .tar files found at specified path ({root}).' _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') info = dict(tartrees=[]) cache_path = '' if cache_tarinfo is None: cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB if cache_tarinfo: cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX cache_path = os.path.join(root, cache_filename) if os.path.exists(cache_path): _logger.info(f'Reading tar info from cache file {cache_path}.') with open(cache_path, 'rb') as pf: info = pickle.load(pf) assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" else: for i, fn in enumerate(tar_filenames): path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) num_children = len(parent_info["children"]) _logger.debug( f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') info['tartrees'].append(parent_info) if cache_path: _logger.info(f'Writing tar info to cache file {cache_path}.') with open(cache_path, 'wb') as pf: pickle.dump(info, pf) samples = [] labels = [] build_class_map = False if class_name_to_idx is None: build_class_map = True # Flatten tartree info into lists of samples and targets w/ targets based on label id via # class map arg or from unique paths. # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children # this covers my current use cases and keeps things a little easier to test for now. tarfiles = [] def _label_from_paths(*path, leaf_only=True): path = os.path.join(*path).strip(os.path.sep) return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') def _add_samples(info, fn): added = 0 for s in info['samples']: label = _label_from_paths(info['path'], os.path.dirname(s.path)) if not build_class_map and label not in class_name_to_idx: continue samples.append((s, fn, info['ti'])) labels.append(label) added += 1 return added _logger.info(f'Collecting samples and building tar states.') for parent_info in info['tartrees']: # if tartree has children, we assume all samples are at the child level tar_name = None if root_is_tar else parent_info['name'] tar_state = TarState() parent_added = 0 for child_info in parent_info['children']: child_added = _add_samples(child_info, fn=tar_name) if child_added: tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) parent_added += child_added parent_added += _add_samples(parent_info, fn=tar_name) if parent_added: tarfiles.append((tar_name, tar_state)) del info if build_class_map: # build class index sorted_labels = list(sorted(set(labels), key=natural_key)) class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} _logger.info(f'Mapping targets and sorting samples.') samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] if sort: samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) samples, targets = zip(*samples_and_targets) samples = np.array(samples) targets = np.array(targets) _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') return samples, targets, class_name_to_idx, tarfiles class ReaderImageInTar(Reader): """ Multi-tarfile dataset reader where there is one .tar file per class """ def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): super().__init__() class_name_to_idx = None if class_map: class_name_to_idx = load_class_map(class_map, root) self.root = root self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( self.root, class_name_to_idx=class_name_to_idx, cache_tarinfo=cache_tarinfo ) self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} if len(tarfiles) == 1 and tarfiles[0][0] is None: self.root_is_tar = True self.tar_state = tarfiles[0][1] else: self.root_is_tar = False self.tar_state = dict(tarfiles) self.cache_tarfiles = cache_tarfiles def __len__(self): return len(self.samples) def __getitem__(self, index): sample = self.samples[index] target = self.targets[index] sample_ti, parent_fn, child_ti = sample parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root tf = None cache_state = None if self.cache_tarfiles: cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] tf = cache_state.tf if tf is None: tf = tarfile.open(parent_abs) if self.cache_tarfiles: cache_state.tf = tf if child_ti is not None: ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None if ctf is None: ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) if self.cache_tarfiles: cache_state.children[child_ti.name].tf = ctf tf = ctf return tf.extractfile(sample_ti), target def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename
pytorch-image-models/timm/data/readers/reader_image_in_tar.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_in_tar.py", "repo_id": "pytorch-image-models", "token_count": 4050 }
171
""" BlurPool layer inspired by - Kornia's Max_BlurPool2d - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` Hacked together by Chris Ha and Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .padding import get_padding class BlurPool2d(nn.Module): r"""Creates a module that computes blurs and downsample a given feature map. See :cite:`zhang2019shiftinvar` for more details. Corresponds to the Downsample class, which does blurring and subsampling Args: channels = Number of input channels filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. stride (int): downsampling filter stride Returns: torch.Tensor: the transformed tensor. """ def __init__(self, channels, filt_size=3, stride=2) -> None: super(BlurPool2d, self).__init__() assert filt_size > 1 self.channels = channels self.filt_size = filt_size self.stride = stride self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) self.register_buffer('filt', blur_filter, persistent=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.pad(x, self.padding, 'reflect') return F.conv2d(x, self.filt, stride=self.stride, groups=self.channels)
pytorch-image-models/timm/layers/blur_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/blur_pool.py", "repo_id": "pytorch-image-models", "token_count": 625 }
172
""" 'Fast' Normalization Functions For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32. Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast) Hacked together by / Copyright 2022 Ross Wightman """ from typing import List, Optional import torch from torch.nn import functional as F try: from apex.normalization.fused_layer_norm import fused_layer_norm_affine has_apex = True except ImportError: has_apex = False try: from apex.normalization.fused_layer_norm import fused_rms_norm_affine, fused_rms_norm has_apex_rmsnorm = True except ImportError: has_apex_rmsnorm = False # fast (ie lower precision LN) can be disabled with this flag if issues crop up _USE_FAST_NORM = False # defaulting to False for now def is_fast_norm(): return _USE_FAST_NORM def set_fast_norm(enable=True): global _USE_FAST_NORM _USE_FAST_NORM = enable def fast_group_norm( x: torch.Tensor, num_groups: int, weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.group_norm(x, num_groups, weight, bias, eps) if torch.is_autocast_enabled(): # normally native AMP casts GN inputs to float32 # here we use the low precision autocast dtype # FIXME what to do re CPU autocast? dt = torch.get_autocast_gpu_dtype() x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.group_norm(x, num_groups, weight, bias, eps) def fast_layer_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.layer_norm(x, normalized_shape, weight, bias, eps) if has_apex: return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps) if torch.is_autocast_enabled(): # normally native AMP casts LN inputs to float32 # apex LN does not, this is behaving like Apex dt = torch.get_autocast_gpu_dtype() # FIXME what to do re CPU autocast? x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.layer_norm(x, normalized_shape, weight, bias, eps) def rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ): norm_ndim = len(normalized_shape) if torch.jit.is_scripting(): # ndim = len(x.shape) # dims = list(range(ndim - norm_ndim, ndim)) # this doesn't work on pytorch <= 1.13.x # NOTE -ve dims cause torchscript to crash in some cases, out of options to work around assert norm_ndim == 1 v = torch.var(x, dim=-1).unsqueeze(-1) # ts crashes with -ve dim + keepdim=True else: dims = tuple(range(-1, -norm_ndim - 1, -1)) v = torch.var(x, dim=dims, keepdim=True) x = x * torch.rsqrt(v + eps) if weight is not None: x = x * weight return x def fast_rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ) -> torch.Tensor: if torch.jit.is_scripting(): # this must be by itself, cannot merge with has_apex_rmsnorm return rms_norm(x, normalized_shape, weight, eps) if has_apex_rmsnorm: if weight is None: return fused_rms_norm(x, normalized_shape, eps) else: return fused_rms_norm_affine(x, weight, normalized_shape, eps) # fallback return rms_norm(x, normalized_shape, weight, eps)
pytorch-image-models/timm/layers/fast_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/fast_norm.py", "repo_id": "pytorch-image-models", "token_count": 1639 }
173
""" MLP module w/ dropout and configurable activation layer Hacked together by / Copyright 2020 Ross Wightman """ from functools import partial from torch import nn as nn from .grn import GlobalResponseNorm from .helpers import to_2tuple class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GluMlp(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, norm_layer=None, bias=True, drop=0., use_conv=False, gate_last=True, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.chunk_dim = 1 if use_conv else -1 self.gate_last = gate_last # use second half of width for gate self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) def forward(self, x): x = self.fc1(x) x1, x2 = x.chunk(2, dim=self.chunk_dim) x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2 x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False) class SwiGLU(nn.Module): """ SwiGLU NOTE: GluMLP above can implement SwiGLU, but this impl has split fc1 and better matches some other common impl which makes mapping checkpoints simpler. """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0]) self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 nn.init.ones_(self.fc1_g.bias) nn.init.normal_(self.fc1_g.weight, std=1e-6) def forward(self, x): x_gate = self.fc1_g(x) x = self.fc1_x(x) x = self.act(x_gate) * x x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GatedMlp(nn.Module): """ MLP as used in gMLP """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, gate_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 # FIXME base reduction on gate property? else: self.gate = nn.Identity() self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.gate(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class GlobalResponseNormMlp(nn.Module): """ MLP w/ Global Response Norm (see grn.py), nn.Linear or 1x1 Conv2d """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv) self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.grn(x) x = self.fc2(x) x = self.drop2(x) return x
pytorch-image-models/timm/layers/mlp.py/0
{ "file_path": "pytorch-image-models/timm/layers/mlp.py", "repo_id": "pytorch-image-models", "token_count": 4251 }
174
""" Squeeze-and-Excitation Channel Attention An SE implementation originally based on PyTorch SE-Net impl. Has since evolved with additional functionality / configuration. Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 Also included is Effective Squeeze-Excitation (ESE). Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn from .create_act import create_act_layer from .helpers import make_divisible class SEModule(nn.Module): """ SE Module as defined in original SE-Nets with a few additions Additions include: * divisor can be specified to keep channels % div == 0 (default: 8) * reduction channels can be specified directly by arg (if rd_channels is set) * reduction channels can be specified by float rd_ratio (default: 1/16) * global max pooling can be added to the squeeze aggregation * customizable activation, normalization, and gate layer """ def __init__( self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): super(SEModule, self).__init__() self.add_maxpool = add_maxpool if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: # experimental codepath, may remove or change x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc1(x_se) x_se = self.act(self.bn(x_se)) x_se = self.fc2(x_se) return x * self.gate(x_se) SqueezeExcite = SEModule # alias class EffectiveSEModule(nn.Module): """ 'Effective Squeeze-Excitation From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 """ def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): super(EffectiveSEModule, self).__init__() self.add_maxpool = add_maxpool self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: # experimental codepath, may remove or change x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc(x_se) return x * self.gate(x_se) EffectiveSqueezeExcite = EffectiveSEModule # alias class SqueezeExciteCl(nn.Module): """ SE Module as defined in original SE-Nets with a few additions Additions include: * divisor can be specified to keep channels % div == 0 (default: 8) * reduction channels can be specified directly by arg (if rd_channels is set) * reduction channels can be specified by float rd_ratio (default: 1/16) * global max pooling can be added to the squeeze aggregation * customizable activation, normalization, and gate layer """ def __init__( self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, bias=True, act_layer=nn.ReLU, gate_layer='sigmoid'): super().__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Linear(channels, rd_channels, bias=bias) self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Linear(rd_channels, channels, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((1, 2), keepdims=True) # FIXME avg dim [1:n-1], don't assume 2D NHWC x_se = self.fc1(x_se) x_se = self.act(x_se) x_se = self.fc2(x_se) return x * self.gate(x_se)
pytorch-image-models/timm/layers/squeeze_excite.py/0
{ "file_path": "pytorch-image-models/timm/layers/squeeze_excite.py", "repo_id": "pytorch-image-models", "token_count": 1859 }
175
""" PyTorch Feature Extraction Helpers A collection of classes, functions, modules to help extract features from models and provide a common interface for describing them. The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict, defaultdict from copy import deepcopy from functools import partial from typing import Dict, List, Sequence, Tuple, Union import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.layers import Format __all__ = ['FeatureInfo', 'FeatureHooks', 'FeatureDictNet', 'FeatureListNet', 'FeatureHookNet'] class FeatureInfo: def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]): prev_reduction = 1 for i, fi in enumerate(feature_info): # sanity check the mandatory fields, there may be additional fields depending on the model assert 'num_chs' in fi and fi['num_chs'] > 0 assert 'reduction' in fi and fi['reduction'] >= prev_reduction prev_reduction = fi['reduction'] assert 'module' in fi fi.setdefault('index', i) self.out_indices = out_indices self.info = feature_info def from_other(self, out_indices: Tuple[int]): return FeatureInfo(deepcopy(self.info), out_indices) def get(self, key, idx=None): """ Get value by key at specified index (indices) if idx == None, returns value for key at each output index if idx is an integer, return value for that feature module index (ignoring output indices) if idx is a list/tupple, return value for each module index (ignoring output indices) """ if idx is None: return [self.info[i][key] for i in self.out_indices] if isinstance(idx, (tuple, list)): return [self.info[i][key] for i in idx] else: return self.info[idx][key] def get_dicts(self, keys=None, idx=None): """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) """ if idx is None: if keys is None: return [self.info[i] for i in self.out_indices] else: return [{k: self.info[i][k] for k in keys} for i in self.out_indices] if isinstance(idx, (tuple, list)): return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] else: return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} def channels(self, idx=None): """ feature channels accessor """ return self.get('num_chs', idx) def reduction(self, idx=None): """ feature reduction (output stride) accessor """ return self.get('reduction', idx) def module_name(self, idx=None): """ feature module name accessor """ return self.get('module', idx) def __getitem__(self, item): return self.info[item] def __len__(self): return len(self.info) class FeatureHooks: """ Feature Hook Helper This module helps with the setup and extraction of hooks for extracting features from internal nodes in a model by node name. FIXME This works well in eager Python but needs redesign for torchscript. """ def __init__( self, hooks: Sequence[str], named_modules: dict, out_map: Sequence[Union[int, str]] = None, default_hook_type: str = 'forward', ): # setup feature hooks self._feature_outputs = defaultdict(OrderedDict) modules = {k: v for k, v in named_modules} for i, h in enumerate(hooks): hook_name = h['module'] m = modules[hook_name] hook_id = out_map[i] if out_map else hook_name hook_fn = partial(self._collect_output_hook, hook_id) hook_type = h.get('hook_type', default_hook_type) if hook_type == 'forward_pre': m.register_forward_pre_hook(hook_fn) elif hook_type == 'forward': m.register_forward_hook(hook_fn) else: assert False, "Unsupported hook type" def _collect_output_hook(self, hook_id, *args): x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre if isinstance(x, tuple): x = x[0] # unwrap input tuple self._feature_outputs[x.device][hook_id] = x def get_output(self, device) -> Dict[str, torch.tensor]: output = self._feature_outputs[device] self._feature_outputs[device] = OrderedDict() # clear after reading return output def _module_list(module, flatten_sequential=False): # a yield/iter would be better for this but wouldn't be compatible with torchscript ml = [] for name, module in module.named_children(): if flatten_sequential and isinstance(module, nn.Sequential): # first level of Sequential containers is flattened into containing model for child_name, child_module in module.named_children(): combined = [name, child_name] ml.append(('_'.join(combined), '.'.join(combined), child_module)) else: ml.append((name, name, module)) return ml def _get_feature_info(net, out_indices): feature_info = getattr(net, 'feature_info') if isinstance(feature_info, FeatureInfo): return feature_info.from_other(out_indices) elif isinstance(feature_info, (list, tuple)): return FeatureInfo(net.feature_info, out_indices) else: assert False, "Provided feature_info is not valid" def _get_return_layers(feature_info, out_map): module_names = feature_info.module_name() return_layers = {} for i, name in enumerate(module_names): return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] return return_layers class FeatureDictNet(nn.ModuleDict): """ Feature extractor with OrderedDict return Wrap a model and extract features as specified by the out indices, the network is partially re-built from contained modules. There is a strong assumption that the modules have been registered into the model in the same order as they are used. There should be no reuse of the same nn.Module more than once, including trivial modules like `self.relu = nn.ReLU`. Only submodules that are directly assigned to the model class (`model.feature1`) or at most one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. All Sequential containers that are directly assigned to the original model will have their modules assigned to this module with the name `model.features.1` being changed to `model.features_1` """ def __init__( self, model: nn.Module, out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), out_map: Sequence[Union[int, str]] = None, output_fmt: str = 'NCHW', feature_concat: bool = False, flatten_sequential: bool = False, ): """ Args: model: Model from which to extract features. out_indices: Output indices of the model features to extract. out_map: Return id mapping for each output index, otherwise str(index) is used. feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting first element e.g. `x[0]` flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules) """ super(FeatureDictNet, self).__init__() self.feature_info = _get_feature_info(model, out_indices) self.output_fmt = Format(output_fmt) self.concat = feature_concat self.grad_checkpointing = False self.return_layers = {} return_layers = _get_return_layers(self.feature_info, out_map) modules = _module_list(model, flatten_sequential=flatten_sequential) remaining = set(return_layers.keys()) layers = OrderedDict() for new_name, old_name, module in modules: layers[new_name] = module if old_name in remaining: # return id has to be consistently str type for torchscript self.return_layers[new_name] = str(return_layers[old_name]) remaining.remove(old_name) if not remaining: break assert not remaining and len(self.return_layers) == len(return_layers), \ f'Return layers ({remaining}) are not present in model' self.update(layers) def set_grad_checkpointing(self, enable: bool = True): self.grad_checkpointing = enable def _collect(self, x) -> (Dict[str, torch.Tensor]): out = OrderedDict() for i, (name, module) in enumerate(self.items()): if self.grad_checkpointing and not torch.jit.is_scripting(): # Skipping checkpoint of first module because need a gradient at input # Skipping last because networks with in-place ops might fail w/ checkpointing enabled # NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues first_or_last_module = i == 0 or i == max(len(self) - 1, 0) x = module(x) if first_or_last_module else checkpoint(module, x) else: x = module(x) if name in self.return_layers: out_id = self.return_layers[name] if isinstance(x, (tuple, list)): # If model tap is a tuple or list, concat or select first element # FIXME this may need to be more generic / flexible for some nets out[out_id] = torch.cat(x, 1) if self.concat else x[0] else: out[out_id] = x return out def forward(self, x) -> Dict[str, torch.Tensor]: return self._collect(x) class FeatureListNet(FeatureDictNet): """ Feature extractor with list return A specialization of FeatureDictNet that always returns features as a list (values() of dict). """ def __init__( self, model: nn.Module, out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), output_fmt: str = 'NCHW', feature_concat: bool = False, flatten_sequential: bool = False, ): """ Args: model: Model from which to extract features. out_indices: Output indices of the model features to extract. feature_concat: Concatenate intermediate features that are lists or tuples instead of selecting first element e.g. `x[0]` flatten_sequential: Flatten first two-levels of sequential modules in model (re-writes model modules) """ super().__init__( model, out_indices=out_indices, output_fmt=output_fmt, feature_concat=feature_concat, flatten_sequential=flatten_sequential, ) def forward(self, x) -> (List[torch.Tensor]): return list(self._collect(x).values()) class FeatureHookNet(nn.ModuleDict): """ FeatureHookNet Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. If `no_rewrite` is True, features are extracted via hooks without modifying the underlying network in any way. If `no_rewrite` is False, the model will be re-written as in the FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. FIXME this does not currently work with Torchscript, see FeatureHooks class """ def __init__( self, model: nn.Module, out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), out_map: Sequence[Union[int, str]] = None, return_dict: bool = False, output_fmt: str = 'NCHW', no_rewrite: bool = False, flatten_sequential: bool = False, default_hook_type: str = 'forward', ): """ Args: model: Model from which to extract features. out_indices: Output indices of the model features to extract. out_map: Return id mapping for each output index, otherwise str(index) is used. return_dict: Output features as a dict. no_rewrite: Enforce that model is not re-written if True, ie no modules are removed / changed. flatten_sequential arg must also be False if this is set True. flatten_sequential: Re-write modules by flattening first two levels of nn.Sequential containers. default_hook_type: The default hook type to use if not specified in model.feature_info. """ super().__init__() assert not torch.jit.is_scripting() self.feature_info = _get_feature_info(model, out_indices) self.return_dict = return_dict self.output_fmt = Format(output_fmt) self.grad_checkpointing = False layers = OrderedDict() hooks = [] if no_rewrite: assert not flatten_sequential if hasattr(model, 'reset_classifier'): # make sure classifier is removed? model.reset_classifier(0) layers['body'] = model hooks.extend(self.feature_info.get_dicts()) else: modules = _module_list(model, flatten_sequential=flatten_sequential) remaining = { f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type for f in self.feature_info.get_dicts() } for new_name, old_name, module in modules: layers[new_name] = module for fn, fm in module.named_modules(prefix=old_name): if fn in remaining: hooks.append(dict(module=fn, hook_type=remaining[fn])) del remaining[fn] if not remaining: break assert not remaining, f'Return layers ({remaining}) are not present in model' self.update(layers) self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) def set_grad_checkpointing(self, enable: bool = True): self.grad_checkpointing = enable def forward(self, x): for i, (name, module) in enumerate(self.items()): if self.grad_checkpointing and not torch.jit.is_scripting(): # Skipping checkpoint of first module because need a gradient at input # Skipping last because networks with in-place ops might fail w/ checkpointing enabled # NOTE: first_or_last module could be static, but recalc in is_scripting guard to avoid jit issues first_or_last_module = i == 0 or i == max(len(self) - 1, 0) x = module(x) if first_or_last_module else checkpoint(module, x) else: x = module(x) out = self.hooks.get_output(x.device) return out if self.return_dict else list(out.values())
pytorch-image-models/timm/models/_features.py/0
{ "file_path": "pytorch-image-models/timm/models/_features.py", "repo_id": "pytorch-image-models", "token_count": 6555 }
176
""" Class-Attention in Image Transformers (CaiT) Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239 Original code and weights from https://github.com/facebookresearch/deit, copyright below Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] class ClassAttn(nn.Module): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to do CA fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=qkv_bias) self.k = nn.Linear(dim, dim, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.fused_attn: x_cls = torch.nn.functional.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_cls = attn @ v x_cls = x_cls.transpose(1, 2).reshape(B, 1, C) x_cls = self.proj(x_cls) x_cls = self.proj_drop(x_cls) return x_cls class LayerScaleBlockClassAttn(nn.Module): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to add CA and LayerScale def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=ClassAttn, mlp_block=Mlp, init_values=1e-4, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x, x_cls): u = torch.cat((x_cls, x), dim=1) x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) return x_cls class TalkingHeadAttn(nn.Module): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_l = nn.Linear(num_heads, num_heads) self.proj_w = nn.Linear(num_heads, num_heads) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = attn.softmax(dim=-1) attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScaleBlock(nn.Module): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to add layerScale def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=1e-4, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = attn_block( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = mlp_block( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class Cait(nn.Module): # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # with slight modifications to adapt to our cait models def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., block_layers=LayerScaleBlock, block_layers_token=LayerScaleBlockClassAttn, patch_layer=PatchEmbed, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, attn_block=TalkingHeadAttn, mlp_block=Mlp, init_values=1e-4, attn_block_token_only=ClassAttn, mlp_block_token_only=Mlp, depth_token_only=2, mlp_ratio_token_only=4.0 ): super().__init__() assert global_pool in ('', 'token', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim self.grad_checkpointing = False self.patch_embed = patch_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_rate) dpr = [drop_path_rate for i in range(depth)] self.blocks = nn.Sequential(*[block_layers( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, attn_block=attn_block, mlp_block=mlp_block, init_values=init_values, ) for i in range(depth)]) self.blocks_token_only = nn.ModuleList([block_layers_token( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio_token_only, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, attn_block=attn_block_token_only, mlp_block=mlp_block_token_only, init_values=init_values, ) for _ in range(depth_token_only)]) self.norm = norm_layer(embed_dim) self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): def _matcher(name): if any([name.startswith(n) for n in ('cls_token', 'pos_embed', 'patch_embed')]): return 0 elif name.startswith('blocks.'): return int(name.split('.')[1]) + 1 elif name.startswith('blocks_token_only.'): # overlap token only blocks with last blocks to_offset = len(self.blocks) - len(self.blocks_token_only) + 1 return int(name.split('.')[1]) + to_offset elif name.startswith('norm.'): return len(self.blocks) else: return float('inf') return _matcher @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = x + self.pos_embed x = self.pos_drop(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for i, blk in enumerate(self.blocks_token_only): cls_tokens = blk(x, cls_tokens) x = torch.cat((cls_tokens, x), dim=1) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model=None): if 'model' in state_dict: state_dict = state_dict['model'] checkpoint_no_module = {} for k, v in state_dict.items(): checkpoint_no_module[k.replace('module.', '')] = v return checkpoint_no_module def _create_cait(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg( Cait, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'cait_xxs24_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', input_size=(3, 224, 224), ), 'cait_xxs24_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth', ), 'cait_xxs36_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', input_size=(3, 224, 224), ), 'cait_xxs36_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth', ), 'cait_xs24_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth', ), 'cait_s24_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', input_size=(3, 224, 224), ), 'cait_s24_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/S24_384.pth', ), 'cait_s36_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/S36_384.pth', ), 'cait_m36_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/M36_384.pth', ), 'cait_m48_448.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', input_size=(3, 448, 448), ), }) @register_model def cait_xxs24_224(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5) model = _create_cait('cait_xxs24_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xxs24_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5) model = _create_cait('cait_xxs24_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xxs36_224(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5) model = _create_cait('cait_xxs36_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xxs36_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5) model = _create_cait('cait_xxs36_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_xs24_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_values=1e-5) model = _create_cait('cait_xs24_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_s24_224(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5) model = _create_cait('cait_s24_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_s24_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5) model = _create_cait('cait_s24_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_s36_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_values=1e-6) model = _create_cait('cait_s36_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_m36_384(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_values=1e-6) model = _create_cait('cait_m36_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def cait_m48_448(pretrained=False, **kwargs) -> Cait: model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_values=1e-6) model = _create_cait('cait_m48_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/cait.py/0
{ "file_path": "pytorch-image-models/timm/models/cait.py", "repo_id": "pytorch-image-models", "token_count": 9133 }
177
""" EfficientViT (by MIT Song Han's Lab) Paper: `Efficientvit: Enhanced linear attention for high-resolution low-computation visual recognition` - https://arxiv.org/abs/2205.14756 Adapted from official impl at https://github.com/mit-han-lab/efficientvit """ __all__ = ['EfficientVit'] from typing import Optional from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.batchnorm import _BatchNorm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, create_conv2d, GELUTanh from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs def val2list(x: list or tuple or any, repeat_time=1): if isinstance(x, (list, tuple)): return list(x) return [x for _ in range(repeat_time)] def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1): # repeat elements if necessary x = val2list(x) if len(x) > 0: x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))] return tuple(x) def get_same_padding(kernel_size: int or tuple[int, ...]) -> int or tuple[int, ...]: if isinstance(kernel_size, tuple): return tuple([get_same_padding(ks) for ks in kernel_size]) else: assert kernel_size % 2 > 0, "kernel size should be odd number" return kernel_size // 2 class ConvNormAct(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size=3, stride=1, dilation=1, groups=1, bias=False, dropout=0., norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super(ConvNormAct, self).__init__() self.dropout = nn.Dropout(dropout, inplace=False) self.conv = create_conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=bias, ) self.norm = norm_layer(num_features=out_channels) if norm_layer else nn.Identity() self.act = act_layer(inplace=True) if act_layer is not None else nn.Identity() def forward(self, x): x = self.dropout(x) x = self.conv(x) x = self.norm(x) x = self.act(x) return x class DSConv(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size=3, stride=1, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, None), ): super(DSConv, self).__init__() use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) self.depth_conv = ConvNormAct( in_channels, in_channels, kernel_size, stride, groups=in_channels, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0], ) self.point_conv = ConvNormAct( in_channels, out_channels, 1, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1], ) def forward(self, x): x = self.depth_conv(x) x = self.point_conv(x) return x class ConvBlock(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size=3, stride=1, mid_channels=None, expand_ratio=1, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, None), ): super(ConvBlock, self).__init__() use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) mid_channels = mid_channels or round(in_channels * expand_ratio) self.conv1 = ConvNormAct( in_channels, mid_channels, kernel_size, stride, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0], ) self.conv2 = ConvNormAct( mid_channels, out_channels, kernel_size, 1, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1], ) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class MBConv(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size=3, stride=1, mid_channels=None, expand_ratio=6, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, nn.ReLU6, None), ): super(MBConv, self).__init__() use_bias = val2tuple(use_bias, 3) norm_layer = val2tuple(norm_layer, 3) act_layer = val2tuple(act_layer, 3) mid_channels = mid_channels or round(in_channels * expand_ratio) self.inverted_conv = ConvNormAct( in_channels, mid_channels, 1, stride=1, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0], ) self.depth_conv = ConvNormAct( mid_channels, mid_channels, kernel_size, stride=stride, groups=mid_channels, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1], ) self.point_conv = ConvNormAct( mid_channels, out_channels, 1, norm_layer=norm_layer[2], act_layer=act_layer[2], bias=use_bias[2], ) def forward(self, x): x = self.inverted_conv(x) x = self.depth_conv(x) x = self.point_conv(x) return x class FusedMBConv(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size=3, stride=1, mid_channels=None, expand_ratio=6, groups=1, use_bias=False, norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d), act_layer=(nn.ReLU6, None), ): super(FusedMBConv, self).__init__() use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) mid_channels = mid_channels or round(in_channels * expand_ratio) self.spatial_conv = ConvNormAct( in_channels, mid_channels, kernel_size, stride=stride, groups=groups, norm_layer=norm_layer[0], act_layer=act_layer[0], bias=use_bias[0], ) self.point_conv = ConvNormAct( mid_channels, out_channels, 1, norm_layer=norm_layer[1], act_layer=act_layer[1], bias=use_bias[1], ) def forward(self, x): x = self.spatial_conv(x) x = self.point_conv(x) return x class LiteMLA(nn.Module): """Lightweight multi-scale linear attention""" def __init__( self, in_channels: int, out_channels: int, heads: int or None = None, heads_ratio: float = 1.0, dim=8, use_bias=False, norm_layer=(None, nn.BatchNorm2d), act_layer=(None, None), kernel_func=nn.ReLU, scales=(5,), eps=1e-5, ): super(LiteMLA, self).__init__() self.eps = eps heads = heads or int(in_channels // dim * heads_ratio) total_dim = heads * dim use_bias = val2tuple(use_bias, 2) norm_layer = val2tuple(norm_layer, 2) act_layer = val2tuple(act_layer, 2) self.dim = dim self.qkv = ConvNormAct( in_channels, 3 * total_dim, 1, bias=use_bias[0], norm_layer=norm_layer[0], act_layer=act_layer[0], ) self.aggreg = nn.ModuleList([ nn.Sequential( nn.Conv2d( 3 * total_dim, 3 * total_dim, scale, padding=get_same_padding(scale), groups=3 * total_dim, bias=use_bias[0], ), nn.Conv2d(3 * total_dim, 3 * total_dim, 1, groups=3 * heads, bias=use_bias[0]), ) for scale in scales ]) self.kernel_func = kernel_func(inplace=False) self.proj = ConvNormAct( total_dim * (1 + len(scales)), out_channels, 1, bias=use_bias[1], norm_layer=norm_layer[1], act_layer=act_layer[1], ) def _attn(self, q, k, v): dtype = v.dtype q, k, v = q.float(), k.float(), v.float() kv = k.transpose(-1, -2) @ v out = q @ kv out = out[..., :-1] / (out[..., -1:] + self.eps) return out.to(dtype) def forward(self, x): B, _, H, W = x.shape # generate multi-scale q, k, v qkv = self.qkv(x) multi_scale_qkv = [qkv] for op in self.aggreg: multi_scale_qkv.append(op(qkv)) multi_scale_qkv = torch.cat(multi_scale_qkv, dim=1) multi_scale_qkv = multi_scale_qkv.reshape(B, -1, 3 * self.dim, H * W).transpose(-1, -2) q, k, v = multi_scale_qkv.chunk(3, dim=-1) # lightweight global attention q = self.kernel_func(q) k = self.kernel_func(k) v = F.pad(v, (0, 1), mode="constant", value=1.) if not torch.jit.is_scripting(): with torch.autocast(device_type=v.device.type, enabled=False): out = self._attn(q, k, v) else: out = self._attn(q, k, v) # final projection out = out.transpose(-1, -2).reshape(B, -1, H, W) out = self.proj(out) return out register_notrace_module(LiteMLA) class EfficientVitBlock(nn.Module): def __init__( self, in_channels, heads_ratio=1.0, head_dim=32, expand_ratio=4, norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish, ): super(EfficientVitBlock, self).__init__() self.context_module = ResidualBlock( LiteMLA( in_channels=in_channels, out_channels=in_channels, heads_ratio=heads_ratio, dim=head_dim, norm_layer=(None, norm_layer), ), nn.Identity(), ) self.local_module = ResidualBlock( MBConv( in_channels=in_channels, out_channels=in_channels, expand_ratio=expand_ratio, use_bias=(True, True, False), norm_layer=(None, None, norm_layer), act_layer=(act_layer, act_layer, None), ), nn.Identity(), ) def forward(self, x): x = self.context_module(x) x = self.local_module(x) return x class ResidualBlock(nn.Module): def __init__( self, main: Optional[nn.Module], shortcut: Optional[nn.Module] = None, pre_norm: Optional[nn.Module] = None, ): super(ResidualBlock, self).__init__() self.pre_norm = pre_norm if pre_norm is not None else nn.Identity() self.main = main self.shortcut = shortcut def forward(self, x): res = self.main(self.pre_norm(x)) if self.shortcut is not None: res = res + self.shortcut(x) return res def build_local_block( in_channels: int, out_channels: int, stride: int, expand_ratio: float, norm_layer: str, act_layer: str, fewer_norm: bool = False, block_type: str = "default", ): assert block_type in ["default", "large", "fused"] if expand_ratio == 1: if block_type == "default": block = DSConv( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=(True, False) if fewer_norm else False, norm_layer=(None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, None), ) else: block = ConvBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, use_bias=(True, False) if fewer_norm else False, norm_layer=(None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, None), ) else: if block_type == "default": block = MBConv( in_channels=in_channels, out_channels=out_channels, stride=stride, expand_ratio=expand_ratio, use_bias=(True, True, False) if fewer_norm else False, norm_layer=(None, None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, act_layer, None), ) else: block = FusedMBConv( in_channels=in_channels, out_channels=out_channels, stride=stride, expand_ratio=expand_ratio, use_bias=(True, False) if fewer_norm else False, norm_layer=(None, norm_layer) if fewer_norm else norm_layer, act_layer=(act_layer, None), ) return block class Stem(nn.Sequential): def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer, block_type='default'): super().__init__() self.stride = 2 self.add_module( 'in_conv', ConvNormAct( in_chs, out_chs, kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer, ) ) stem_block = 0 for _ in range(depth): self.add_module(f'res{stem_block}', ResidualBlock( build_local_block( in_channels=out_chs, out_channels=out_chs, stride=1, expand_ratio=1, norm_layer=norm_layer, act_layer=act_layer, block_type=block_type, ), nn.Identity(), )) stem_block += 1 class EfficientVitStage(nn.Module): def __init__( self, in_chs, out_chs, depth, norm_layer, act_layer, expand_ratio, head_dim, vit_stage=False, ): super(EfficientVitStage, self).__init__() blocks = [ResidualBlock( build_local_block( in_channels=in_chs, out_channels=out_chs, stride=2, expand_ratio=expand_ratio, norm_layer=norm_layer, act_layer=act_layer, fewer_norm=vit_stage, ), None, )] in_chs = out_chs if vit_stage: # for stage 3, 4 for _ in range(depth): blocks.append( EfficientVitBlock( in_channels=in_chs, head_dim=head_dim, expand_ratio=expand_ratio, norm_layer=norm_layer, act_layer=act_layer, ) ) else: # for stage 1, 2 for i in range(1, depth): blocks.append(ResidualBlock( build_local_block( in_channels=in_chs, out_channels=out_chs, stride=1, expand_ratio=expand_ratio, norm_layer=norm_layer, act_layer=act_layer ), nn.Identity(), )) self.blocks = nn.Sequential(*blocks) def forward(self, x): return self.blocks(x) class EfficientVitLargeStage(nn.Module): def __init__( self, in_chs, out_chs, depth, norm_layer, act_layer, head_dim, vit_stage=False, fewer_norm=False, ): super(EfficientVitLargeStage, self).__init__() blocks = [ResidualBlock( build_local_block( in_channels=in_chs, out_channels=out_chs, stride=2, expand_ratio=24 if vit_stage else 16, norm_layer=norm_layer, act_layer=act_layer, fewer_norm=vit_stage or fewer_norm, block_type='default' if fewer_norm else 'fused', ), None, )] in_chs = out_chs if vit_stage: # for stage 4 for _ in range(depth): blocks.append( EfficientVitBlock( in_channels=in_chs, head_dim=head_dim, expand_ratio=6, norm_layer=norm_layer, act_layer=act_layer, ) ) else: # for stage 1, 2, 3 for i in range(depth): blocks.append(ResidualBlock( build_local_block( in_channels=in_chs, out_channels=out_chs, stride=1, expand_ratio=4, norm_layer=norm_layer, act_layer=act_layer, fewer_norm=fewer_norm, block_type='default' if fewer_norm else 'fused', ), nn.Identity(), )) self.blocks = nn.Sequential(*blocks) def forward(self, x): return self.blocks(x) class ClassifierHead(nn.Module): def __init__( self, in_channels, widths, n_classes=1000, dropout=0., norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish, global_pool='avg', norm_eps=1e-5, ): super(ClassifierHead, self).__init__() self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer) self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True, input_fmt='NCHW') self.classifier = nn.Sequential( nn.Linear(widths[0], widths[1], bias=False), nn.LayerNorm(widths[1], eps=norm_eps), act_layer(inplace=True) if act_layer is not None else nn.Identity(), nn.Dropout(dropout, inplace=False), nn.Linear(widths[1], n_classes, bias=True), ) def forward(self, x, pre_logits: bool = False): x = self.in_conv(x) x = self.global_pool(x) if pre_logits: return x x = self.classifier(x) return x class EfficientVit(nn.Module): def __init__( self, in_chans=3, widths=(), depths=(), head_dim=32, expand_ratio=4, norm_layer=nn.BatchNorm2d, act_layer=nn.Hardswish, global_pool='avg', head_widths=(), drop_rate=0.0, num_classes=1000, ): super(EfficientVit, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.num_classes = num_classes # input stem self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer) stride = self.stem.stride # stages self.feature_info = [] self.stages = nn.Sequential() in_channels = widths[0] for i, (w, d) in enumerate(zip(widths[1:], depths[1:])): self.stages.append(EfficientVitStage( in_channels, w, depth=d, norm_layer=norm_layer, act_layer=act_layer, expand_ratio=expand_ratio, head_dim=head_dim, vit_stage=i >= 2, )) stride *= 2 in_channels = w self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')] self.num_features = in_channels self.head_widths = head_widths self.head_dropout = drop_rate if num_classes > 0: self.head = ClassifierHead( self.num_features, self.head_widths, n_classes=num_classes, dropout=self.head_dropout, global_pool=self.global_pool, ) else: if self.global_pool == 'avg': self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: self.head = nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.classifier[-1] def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool if num_classes > 0: self.head = ClassifierHead( self.num_features, self.head_widths, n_classes=num_classes, dropout=self.head_dropout, global_pool=self.global_pool, ) else: if self.global_pool == 'avg': self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True) else: self.head = nn.Identity() def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class EfficientVitLarge(nn.Module): def __init__( self, in_chans=3, widths=(), depths=(), head_dim=32, norm_layer=nn.BatchNorm2d, act_layer=GELUTanh, global_pool='avg', head_widths=(), drop_rate=0.0, num_classes=1000, norm_eps=1e-7, ): super(EfficientVitLarge, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.num_classes = num_classes self.norm_eps = norm_eps norm_layer = partial(norm_layer, eps=self.norm_eps) # input stem self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer, block_type='large') stride = self.stem.stride # stages self.feature_info = [] self.stages = nn.Sequential() in_channels = widths[0] for i, (w, d) in enumerate(zip(widths[1:], depths[1:])): self.stages.append(EfficientVitLargeStage( in_channels, w, depth=d, norm_layer=norm_layer, act_layer=act_layer, head_dim=head_dim, vit_stage=i >= 3, fewer_norm=i >= 2, )) stride *= 2 in_channels = w self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')] self.num_features = in_channels self.head_widths = head_widths self.head_dropout = drop_rate if num_classes > 0: self.head = ClassifierHead( self.num_features, self.head_widths, n_classes=num_classes, dropout=self.head_dropout, global_pool=self.global_pool, act_layer=act_layer, norm_eps=self.norm_eps, ) else: if self.global_pool == 'avg': self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: self.head = nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.classifier[-1] def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool if num_classes > 0: self.head = ClassifierHead( self.num_features, self.head_widths, n_classes=num_classes, dropout=self.head_dropout, global_pool=self.global_pool, norm_eps=self.norm_eps ) else: if self.global_pool == 'avg': self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True) else: self.head = nn.Identity() def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.in_conv.conv', 'classifier': 'head.classifier.4', 'crop_pct': 0.95, 'input_size': (3, 224, 224), 'pool_size': (7, 7), **kwargs, } default_cfgs = generate_default_cfgs({ 'efficientvit_b0.r224_in1k': _cfg( hf_hub_id='timm/', ), 'efficientvit_b1.r224_in1k': _cfg( hf_hub_id='timm/', ), 'efficientvit_b1.r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, ), 'efficientvit_b1.r288_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, ), 'efficientvit_b2.r224_in1k': _cfg( hf_hub_id='timm/', ), 'efficientvit_b2.r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, ), 'efficientvit_b2.r288_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, ), 'efficientvit_b3.r224_in1k': _cfg( hf_hub_id='timm/', ), 'efficientvit_b3.r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, ), 'efficientvit_b3.r288_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, ), 'efficientvit_l1.r224_in1k': _cfg( hf_hub_id='timm/', crop_pct=1.0, ), 'efficientvit_l2.r224_in1k': _cfg( hf_hub_id='timm/', crop_pct=1.0, ), 'efficientvit_l2.r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, ), 'efficientvit_l2.r288_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, ), 'efficientvit_l2.r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'efficientvit_l3.r224_in1k': _cfg( hf_hub_id='timm/', crop_pct=1.0, ), 'efficientvit_l3.r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, ), 'efficientvit_l3.r320_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, ), 'efficientvit_l3.r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), # 'efficientvit_l0_sam.sam': _cfg( # # hf_hub_id='timm/', # input_size=(3, 512, 512), crop_pct=1.0, # num_classes=0, # ), # 'efficientvit_l1_sam.sam': _cfg( # # hf_hub_id='timm/', # input_size=(3, 512, 512), crop_pct=1.0, # num_classes=0, # ), # 'efficientvit_l2_sam.sam': _cfg( # # hf_hub_id='timm/',f # input_size=(3, 512, 512), crop_pct=1.0, # num_classes=0, # ), }) def _create_efficientvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( EfficientVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model def _create_efficientvit_large(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( EfficientVitLarge, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model @register_model def efficientvit_b0(pretrained=False, **kwargs): model_args = dict( widths=(8, 16, 32, 64, 128), depths=(1, 2, 2, 2, 2), head_dim=16, head_widths=(1024, 1280)) return _create_efficientvit('efficientvit_b0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_b1(pretrained=False, **kwargs): model_args = dict( widths=(16, 32, 64, 128, 256), depths=(1, 2, 3, 3, 4), head_dim=16, head_widths=(1536, 1600)) return _create_efficientvit('efficientvit_b1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_b2(pretrained=False, **kwargs): model_args = dict( widths=(24, 48, 96, 192, 384), depths=(1, 3, 4, 4, 6), head_dim=32, head_widths=(2304, 2560)) return _create_efficientvit('efficientvit_b2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_b3(pretrained=False, **kwargs): model_args = dict( widths=(32, 64, 128, 256, 512), depths=(1, 4, 6, 6, 9), head_dim=32, head_widths=(2304, 2560)) return _create_efficientvit('efficientvit_b3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_l1(pretrained=False, **kwargs): model_args = dict( widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, head_widths=(3072, 3200)) return _create_efficientvit_large('efficientvit_l1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_l2(pretrained=False, **kwargs): model_args = dict( widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(3072, 3200)) return _create_efficientvit_large('efficientvit_l2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_l3(pretrained=False, **kwargs): model_args = dict( widths=(64, 128, 256, 512, 1024), depths=(1, 2, 2, 8, 8), head_dim=32, head_widths=(6144, 6400)) return _create_efficientvit_large('efficientvit_l3', pretrained=pretrained, **dict(model_args, **kwargs)) # FIXME will wait for v2 SAM models which are pending # @register_model # def efficientvit_l0_sam(pretrained=False, **kwargs): # # only backbone for segment-anything-model weights # model_args = dict( # widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 4, 4), head_dim=32, num_classes=0, norm_eps=1e-6) # return _create_efficientvit_large('efficientvit_l0_sam', pretrained=pretrained, **dict(model_args, **kwargs)) # # # @register_model # def efficientvit_l1_sam(pretrained=False, **kwargs): # # only backbone for segment-anything-model weights # model_args = dict( # widths=(32, 64, 128, 256, 512), depths=(1, 1, 1, 6, 6), head_dim=32, num_classes=0, norm_eps=1e-6) # return _create_efficientvit_large('efficientvit_l1_sam', pretrained=pretrained, **dict(model_args, **kwargs)) # # # @register_model # def efficientvit_l2_sam(pretrained=False, **kwargs): # # only backbone for segment-anything-model weights # model_args = dict( # widths=(32, 64, 128, 256, 512), depths=(1, 2, 2, 8, 8), head_dim=32, num_classes=0, norm_eps=1e-6) # return _create_efficientvit_large('efficientvit_l2_sam', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/efficientvit_mit.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientvit_mit.py", "repo_id": "pytorch-image-models", "token_count": 18668 }
178
""" Pytorch Inception-Resnet-V2 implementation Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionResnetV2'] class Mixed_5b(nn.Module): def __init__(self, conv_block=None): super(Mixed_5b, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(192, 48, kernel_size=1, stride=1), conv_block(48, 64, kernel_size=5, stride=1, padding=2) ) self.branch2 = nn.Sequential( conv_block(192, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(192, 64, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block35, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 48, kernel_size=3, stride=1, padding=1), conv_block(48, 64, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_6a(nn.Module): def __init__(self, conv_block=None): super(Mixed_6a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( conv_block(320, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=3, stride=1, padding=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block17, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(1088, 128, kernel_size=1, stride=1), conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) ) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_7a(nn.Module): def __init__(self, conv_block=None): super(Mixed_7a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=1, padding=1), conv_block(288, 320, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, no_relu=False, conv_block=None): super(Block8, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(2080, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) ) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) self.relu = None if no_relu else nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if self.relu is not None: out = self.relu(out) return out class InceptionResnetV2(nn.Module): def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg', norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionResnetV2, self).__init__() self.num_classes = num_classes self.num_features = 1536 assert output_stride == 32 conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b(conv_block=conv_block) self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] self.mixed_6a = Mixed_6a(conv_block=conv_block) self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)]) self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] self.mixed_7a = Mixed_7a(conv_block=conv_block) self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)]) self.block8 = Block8(no_relu=True, conv_block=conv_block) self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] self.global_pool, self.head_drop, self.classif = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('classif',)) def _matcher(name): if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): return 1 elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): return len(module_map) + 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.classif def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classif(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) default_cfgs = generate_default_cfgs({ # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz 'inception_resnet_v2.tf_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', }, # As per https://arxiv.org/abs/1705.07204 and # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz 'inception_resnet_v2.tf_ens_adv_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', } }) @register_model def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k', })
pytorch-image-models/timm/models/inception_resnet_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/inception_resnet_v2.py", "repo_id": "pytorch-image-models", "token_count": 6015 }
179
""" pnasnet5large implementation grabbed from Cadene's pretrained models Additional credit to https://github.com/creafz https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py """ from collections import OrderedDict from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['PNASNet5Large'] class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d( in_channels, out_channels, kernel_size=1, padding=padding) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, middle_channels, kernel_size, stride=stride, padding=padding) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) self.act_2 = nn.ReLU() self.separable_2 = SeparableConv2d( middle_channels, out_channels, kernel_size, stride=1, padding=padding) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class FactorizedReduction(nn.Module): def __init__(self, in_channels, out_channels, padding=''): super(FactorizedReduction, self).__init__() self.act = nn.ReLU() self.path_1 = nn.Sequential(OrderedDict([ ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), ])) self.path_2 = nn.Sequential(OrderedDict([ ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), ])) self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act(x) x_path1 = self.path_1(x) x_path2 = self.path_2(x) out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) return out class CellBase(nn.Module): def cell_forward(self, x_left, x_right): x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) x_comb_iter_3_right = self.comb_iter_3_right(x_right) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_left) if self.comb_iter_4_right is not None: x_comb_iter_4_right = self.comb_iter_4_right(x_right) else: x_comb_iter_4_right = x_right x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem0(CellBase): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(CellStem0, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables( in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) self.comb_iter_0_right = nn.Sequential(OrderedDict([ ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), ])) self.comb_iter_1_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) self.comb_iter_2_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) self.comb_iter_2_right = BranchSeparables( out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) self.comb_iter_3_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) self.comb_iter_4_left = BranchSeparables( in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) self.comb_iter_4_right = ActConvBn( out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) def forward(self, x_left): x_right = self.conv_1x1(x_left) x_out = self.cell_forward(x_left, x_right) return x_out class Cell(CellBase): def __init__( self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', is_reduction=False, match_prev_layer_dims=False, ): super(Cell, self).__init__() # If `is_reduction` is set to `True` stride 2 is used for # convolution and pooling layers to reduce the spatial size of # the output of a cell approximately by a factor of 2. stride = 2 if is_reduction else 1 # If `match_prev_layer_dimensions` is set to `True` # `FactorizedReduction` is used to reduce the spatial size # of the left input of a cell approximately by a factor of 2. self.match_prev_layer_dimensions = match_prev_layer_dims if match_prev_layer_dims: self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) else: self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables( out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_1_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_2_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) self.comb_iter_2_right = BranchSeparables( out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_4_left = BranchSeparables( out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) if is_reduction: self.comb_iter_4_right = ActConvBn( out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) else: self.comb_iter_4_right = None def forward(self, x_left, x_right): x_left = self.conv_prev_1x1(x_left) x_right = self.conv_1x1(x_right) x_out = self.cell_forward(x_left, x_right) return x_out class PNASNet5Large(nn.Module): def __init__( self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type='', ): super(PNASNet5Large, self).__init__() self.num_classes = num_classes self.num_features = 4320 assert output_stride == 32 self.conv_0 = ConvNormAct( in_chans, 96, kernel_size=3, stride=2, padding=0, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0( in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) self.cell_stem_1 = Cell( in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, match_prev_layer_dims=True, is_reduction=True) self.cell_0 = Cell( in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, match_prev_layer_dims=True) self.cell_1 = Cell( in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_2 = Cell( in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_3 = Cell( in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_4 = Cell( in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, is_reduction=True) self.cell_5 = Cell( in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, match_prev_layer_dims=True) self.cell_6 = Cell( in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) self.cell_7 = Cell( in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) self.cell_8 = Cell( in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, is_reduction=True) self.cell_9 = Cell( in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, match_prev_layer_dims=True) self.cell_10 = Cell( in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) self.cell_11 = Cell( in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) self.act = nn.ReLU() self.feature_info = [ dict(num_chs=96, reduction=2, module='conv_0'), dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), dict(num_chs=4320, reduction=32, module='act'), ] self.global_pool, self.head_drop, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem=r'^conv_0|cell_stem_[01]', blocks=r'^cell_(\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv_0 = self.conv_0(x) x_stem_0 = self.cell_stem_0(x_conv_0) x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) x_cell_0 = self.cell_0(x_stem_0, x_stem_1) x_cell_1 = self.cell_1(x_stem_1, x_cell_0) x_cell_2 = self.cell_2(x_cell_0, x_cell_1) x_cell_3 = self.cell_3(x_cell_1, x_cell_2) x_cell_4 = self.cell_4(x_cell_2, x_cell_3) x_cell_5 = self.cell_5(x_cell_3, x_cell_4) x_cell_6 = self.cell_6(x_cell_4, x_cell_5) x_cell_7 = self.cell_7(x_cell_5, x_cell_6) x_cell_8 = self.cell_8(x_cell_6, x_cell_7) x_cell_9 = self.cell_9(x_cell_7, x_cell_8) x_cell_10 = self.cell_10(x_cell_8, x_cell_9) x_cell_11 = self.cell_11(x_cell_9, x_cell_10) x = self.act(x_cell_11) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_pnasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( PNASNet5Large, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model **kwargs, ) default_cfgs = generate_default_cfgs({ 'pnasnet5large.tf_in1k': { 'hf_hub_id': 'timm/', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv_0.conv', 'classifier': 'last_linear', }, }) @register_model def pnasnet5large(pretrained=False, **kwargs) -> PNASNet5Large: r"""PNASNet-5 model architecture from the `"Progressive Neural Architecture Search" <https://arxiv.org/abs/1712.00559>`_ paper. """ model_kwargs = dict(pad_type='same', **kwargs) return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs)
pytorch-image-models/timm/models/pnasnet.py/0
{ "file_path": "pytorch-image-models/timm/models/pnasnet.py", "repo_id": "pytorch-image-models", "token_count": 7653 }
180
""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/abs/2111.09883 Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # -------------------------------------------------------- # Swin Transformer V2 # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ze Liu # -------------------------------------------------------- import math from typing import Callable, Optional, Tuple, Union, Set, Dict import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, ClassifierHead,\ resample_patch_embed, ndgrid, get_act_layer, LayerType from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['SwinTransformerV2'] # model_registry will add each entrypoint fn to this _int_or_tuple_2_t = Union[int, Tuple[int, int]] def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor: """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor: """ Args: windows: (num_windows * B, window_size[0], window_size[1], C) window_size (Tuple[int, int]): Window size img_size (Tuple[int, int]): Image size Returns: x: (B, H, W, C) """ H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowAttention(nn.Module): r""" Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 pretrained_window_size (tuple[int]): The height and width of the window in pre-training. """ def __init__( self, dim: int, window_size: Tuple[int, int], num_heads: int, qkv_bias: bool = True, attn_drop: float = 0., proj_drop: float = 0., pretrained_window_size: Tuple[int, int] = (0, 0), ) -> None: super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.pretrained_window_size = pretrained_window_size self.num_heads = num_heads self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) # mlp to generate continuous relative position bias self.cpb_mlp = nn.Sequential( nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) ) # get relative_coords_table relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0]).to(torch.float32) relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1]).to(torch.float32) relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w)) relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) else: relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2( torch.abs(relative_coords_table) + 1.0) / math.log2(8) self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(ndgrid(coords_h, coords_w)) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer("relative_position_index", relative_position_index, persistent=False) self.qkv = nn.Linear(dim, dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(dim)) self.register_buffer('k_bias', torch.zeros(dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(dim)) else: self.q_bias = None self.k_bias = None self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.softmax = nn.Softmax(dim=-1) def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # cosine attention attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp() attn = attn * logit_scale relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: num_win = mask.shape[0] attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2Block(nn.Module): """ Swin Transformer Block. """ def __init__( self, dim: int, input_resolution: _int_or_tuple_2_t, num_heads: int, window_size: _int_or_tuple_2_t = 7, shift_size: _int_or_tuple_2_t = 0, mlp_ratio: float = 4., qkv_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., act_layer: LayerType = "gelu", norm_layer: nn.Module = nn.LayerNorm, pretrained_window_size: _int_or_tuple_2_t = 0, ) -> None: """ Args: dim: Number of input channels. input_resolution: Input resolution. num_heads: Number of attention heads. window_size: Window size. shift_size: Shift size for SW-MSA. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. proj_drop: Dropout rate. attn_drop: Attention dropout rate. drop_path: Stochastic depth rate. act_layer: Activation layer. norm_layer: Normalization layer. pretrained_window_size: Window size in pretraining. """ super().__init__() self.dim = dim self.input_resolution = to_2tuple(input_resolution) self.num_heads = num_heads ws, ss = self._calc_window_shift(window_size, shift_size) self.window_size: Tuple[int, int] = ws self.shift_size: Tuple[int, int] = ss self.window_area = self.window_size[0] * self.window_size[1] self.mlp_ratio = mlp_ratio act_layer = get_act_layer(act_layer) self.attn = WindowAttention( dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, pretrained_window_size=to_2tuple(pretrained_window_size), ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() if any(self.shift_size): # calculate attention mask for SW-MSA H, W = self.input_resolution img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 cnt = 0 for h in ( slice(0, -self.window_size[0]), slice(-self.window_size[0], -self.shift_size[0]), slice(-self.shift_size[0], None)): for w in ( slice(0, -self.window_size[1]), slice(-self.window_size[1], -self.shift_size[1]), slice(-self.shift_size[1], None)): img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask, persistent=False) def _calc_window_shift(self, target_window_size: _int_or_tuple_2_t, target_shift_size: _int_or_tuple_2_t) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) target_shift_size = to_2tuple(target_shift_size) window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] return tuple(window_size), tuple(shift_size) def _attn(self, x: torch.Tensor) -> torch.Tensor: B, H, W, C = x.shape # cyclic shift has_shift = any(self.shift_size) if has_shift: shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) else: shifted_x = x # partition windows x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) shifted_x = window_reverse(attn_windows, self.window_size, self.input_resolution) # B H' W' C # reverse cyclic shift if has_shift: x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) else: x = shifted_x return x def forward(self, x: torch.Tensor) -> torch.Tensor: B, H, W, C = x.shape x = x + self.drop_path1(self.norm1(self._attn(x))) x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): """ Patch Merging Layer. """ def __init__(self, dim: int, out_dim: Optional[int] = None, norm_layer: nn.Module = nn.LayerNorm) -> None: """ Args: dim (int): Number of input channels. out_dim (int): Number of output channels (or 2 * dim if None) norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ super().__init__() self.dim = dim self.out_dim = out_dim or 2 * dim self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) self.norm = norm_layer(self.out_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: B, H, W, C = x.shape _assert(H % 2 == 0, f"x height ({H}) is not even.") _assert(W % 2 == 0, f"x width ({W}) is not even.") x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.reduction(x) x = self.norm(x) return x class SwinTransformerV2Stage(nn.Module): """ A Swin Transformer V2 Stage. """ def __init__( self, dim: int, out_dim: int, input_resolution: _int_or_tuple_2_t, depth: int, num_heads: int, window_size: _int_or_tuple_2_t, downsample: bool = False, mlp_ratio: float = 4., qkv_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., act_layer: Union[str, Callable] = 'gelu', norm_layer: nn.Module = nn.LayerNorm, pretrained_window_size: _int_or_tuple_2_t = 0, output_nchw: bool = False, ) -> None: """ Args: dim: Number of input channels. out_dim: Number of output channels. input_resolution: Input resolution. depth: Number of blocks. num_heads: Number of attention heads. window_size: Local window size. downsample: Use downsample layer at start of the block. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. proj_drop: Projection dropout rate attn_drop: Attention dropout rate. drop_path: Stochastic depth rate. act_layer: Activation layer type. norm_layer: Normalization layer. pretrained_window_size: Local window size in pretraining. output_nchw: Output tensors on NCHW format instead of NHWC. """ super().__init__() self.dim = dim self.input_resolution = input_resolution self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution self.depth = depth self.output_nchw = output_nchw self.grad_checkpointing = False window_size = to_2tuple(window_size) shift_size = tuple([w // 2 for w in window_size]) # patch merging / downsample layer if downsample: self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer) else: assert dim == out_dim self.downsample = nn.Identity() # build blocks self.blocks = nn.ModuleList([ SwinTransformerV2Block( dim=out_dim, input_resolution=self.output_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else shift_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, act_layer=act_layer, norm_layer=norm_layer, pretrained_window_size=pretrained_window_size, ) for i in range(depth)]) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.downsample(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x) else: x = blk(x) return x def _init_respostnorm(self) -> None: for blk in self.blocks: nn.init.constant_(blk.norm1.bias, 0) nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) class SwinTransformerV2(nn.Module): """ Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/abs/2111.09883 """ def __init__( self, img_size: _int_or_tuple_2_t = 224, patch_size: int = 4, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), num_heads: Tuple[int, ...] = (3, 6, 12, 24), window_size: _int_or_tuple_2_t = 7, mlp_ratio: float = 4., qkv_bias: bool = True, drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0.1, act_layer: Union[str, Callable] = 'gelu', norm_layer: Callable = nn.LayerNorm, pretrained_window_sizes: Tuple[int, ...] = (0, 0, 0, 0), **kwargs, ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of input image channels. num_classes: Number of classes for classification head. embed_dim: Patch embedding dimension. depths: Depth of each Swin Transformer stage (layer). num_heads: Number of attention heads in different layers. window_size: Window size. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. drop_rate: Head dropout rate. proj_drop_rate: Projection dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. norm_layer: Normalization layer. act_layer: Activation layer type. patch_norm: If True, add normalization after patch embedding. pretrained_window_sizes: Pretrained window sizes of each layer. output_fmt: Output tensor format if not None, otherwise output 'NHWC' by default. """ super().__init__() self.num_classes = num_classes assert global_pool in ('', 'avg') self.global_pool = global_pool self.output_fmt = 'NHWC' self.num_layers = len(depths) self.embed_dim = embed_dim self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.feature_info = [] if not isinstance(embed_dim, (tuple, list)): embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim[0], norm_layer=norm_layer, output_fmt='NHWC', ) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] layers = [] in_dim = embed_dim[0] scale = 1 for i in range(self.num_layers): out_dim = embed_dim[i] layers += [SwinTransformerV2Stage( dim=in_dim, out_dim=out_dim, input_resolution=( self.patch_embed.grid_size[0] // scale, self.patch_embed.grid_size[1] // scale), depth=depths[i], downsample=i > 0, num_heads=num_heads[i], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], act_layer=act_layer, norm_layer=norm_layer, pretrained_window_size=pretrained_window_sizes[i], )] in_dim = out_dim if i > 0: scale *= 2 self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')] self.layers = nn.Sequential(*layers) self.norm = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) self.apply(self._init_weights) for bly in self.layers: bly._init_respostnorm() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): nod = set() for n, m in self.named_modules(): if any([kw in n for kw in ("cpb_mlp", "logit_scale")]): nod.add(n) return nod @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^absolute_pos_embed|patch_embed', # stem and embed blocks=r'^layers\.(\d+)' if coarse else [ (r'^layers\.(\d+).downsample', (0,)), (r'^layers\.(\d+)\.\w+\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.layers: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) native_checkpoint = 'head.fc.weight' in state_dict out_dict = {} import re for k, v in state_dict.items(): if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]): continue # skip buffers that should not be persistent if 'patch_embed.proj.weight' in k: _, _, H, W = model.patch_embed.proj.weight.shape if v.shape[-2] != H or v.shape[-1] != W: v = resample_patch_embed( v, (H, W), interpolation='bicubic', antialias=True, verbose=True, ) if not native_checkpoint: # skip layer remapping for updated checkpoints k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( SwinTransformerV2, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth', ), 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth', ), 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'swinv2_tiny_window8_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth', ), 'swinv2_tiny_window16_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth', ), 'swinv2_small_window8_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth', ), 'swinv2_small_window16_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth', ), 'swinv2_base_window8_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth', ), 'swinv2_base_window16_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth', ), 'swinv2_base_window12_192.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth', num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6) ), 'swinv2_large_window12_192.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth', num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6) ), }) @register_model def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2( 'swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2( 'swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2( 'swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer_v2( 'swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k', 'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k', 'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k', 'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k', 'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k', 'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k', })
pytorch-image-models/timm/models/swin_transformer_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/swin_transformer_v2.py", "repo_id": "pytorch-image-models", "token_count": 16934 }
181
""" Cross-Covariance Image Transformer (XCiT) in PyTorch Paper: - https://arxiv.org/abs/2106.09681 Same as the official implementation, with some minor adaptations, original copyright below - https://github.com/facebookresearch/xcit/blob/master/xcit.py Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. import math from functools import partial import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, to_2tuple from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .cait import ClassAttn from .vision_transformer import Mlp __all__ = ['Xcit'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): """ Positional encoding relying on a fourier kernel matching the one used in the "Attention is all you Need" paper. Based on the official XCiT code - https://github.com/facebookresearch/xcit/blob/master/xcit.py """ def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim self.eps = 1e-6 def forward(self, B: int, H: int, W: int): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype y_embed = torch.arange(1, H + 1, device=device).to(torch.float32).unsqueeze(1).repeat(1, 1, W) x_embed = torch.arange(1, W + 1, device=device).to(torch.float32).repeat(1, H, 1) y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange(self.hidden_dim, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos.repeat(B, 1, 1, 1) # (B, C, H, W) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution + batch norm""" return torch.nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(out_planes) ) class ConvPatchEmbed(nn.Module): """Image to Patch Embedding using multiple convolutional layers""" def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): super().__init__() img_size = to_2tuple(img_size) num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches if patch_size == 16: self.proj = torch.nn.Sequential( conv3x3(in_chans, embed_dim // 8, 2), act_layer(), conv3x3(embed_dim // 8, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2), ) elif patch_size == 8: self.proj = torch.nn.Sequential( conv3x3(in_chans, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2), ) else: raise('For convolutional projection, patch size has to be in [8, 16]') def forward(self, x): x = self.proj(x) Hp, Wp = x.shape[2], x.shape[3] x = x.flatten(2).transpose(1, 2) # (B, N, C) return x, (Hp, Wp) class LPI(nn.Module): """ Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d """ def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): super().__init__() out_features = out_features or in_features padding = kernel_size // 2 self.conv1 = torch.nn.Conv2d( in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) self.act = act_layer() self.bn = nn.BatchNorm2d(in_features) self.conv2 = torch.nn.Conv2d( in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) def forward(self, x, H: int, W: int): B, N, C = x.shape x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.conv1(x) x = self.act(x) x = self.bn(x) x = self.conv2(x) x = x.reshape(B, C, N).permute(0, 2, 1) return x class ClassAttentionBlock(nn.Module): """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) if eta is not None: # LayerScale Initialization (no layerscale when None) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) else: self.gamma1, self.gamma2 = 1.0, 1.0 # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 self.tokens_norm = tokens_norm def forward(self, x): x_norm1 = self.norm1(x) x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) x = x + self.drop_path(self.gamma1 * x_attn) if self.tokens_norm: x = self.norm2(x) else: x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) x_res = x cls_token = x[:, 0:1] cls_token = self.gamma2 * self.mlp(cls_token) x = torch.cat([cls_token, x[:, 1:]], dim=1) x = x_res + self.drop_path(x) return x class XCA(nn.Module): """ Cross-Covariance Attention (XCA) Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) """ def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) # Paper section 3.2 l2-Normalization and temperature scaling q = torch.nn.functional.normalize(q, dim=-1) k = torch.nn.functional.normalize(k, dim=-1) attn = (q @ k.transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # (B, H, C', N), permute -> (B, N, H, C') x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class XCABlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., ): super().__init__() self.norm1 = norm_layer(dim) self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm3 = norm_layer(dim) self.local_mp = LPI(in_features=dim, act_layer=act_layer) self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma3 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) def forward(self, x, H: int, W: int): x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) return x class Xcit(nn.Module): """ Based on timm and DeiT code bases https://github.com/rwightman/pytorch-image-models/tree/master/timm https://github.com/facebookresearch/deit/ """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, ): """ Args: img_size (int, tuple): input image size patch_size (int): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP pos_drop_rate: position embedding dropout rate proj_drop_rate (float): projection dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate (constant across all layers) norm_layer: (nn.Module): normalization layer cls_attn_layers: (int) Depth of Class attention layers use_pos_embed: (bool) whether to use positional encoding eta: (float) layerscale initialization value tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA Notes: - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch interaction (class LPI) and the patch embedding (class ConvPatchEmbed) """ super().__init__() assert global_pool in ('', 'avg', 'token') img_size = to_2tuple(img_size) assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ '`patch_size` should divide image dimensions evenly' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim self.global_pool = global_pool self.grad_checkpointing = False self.patch_embed = ConvPatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer, ) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_pos_embed: self.pos_embed = PositionalEncodingFourier(dim=embed_dim) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) self.blocks = nn.ModuleList([ XCABlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, ) for _ in range(depth)]) self.cls_attn_blocks = nn.ModuleList([ ClassAttentionBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=drop_rate, attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm, ) for _ in range(cls_attn_layers)]) # Classifier head self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # Init weights trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=r'^blocks\.(\d+)', cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): B = x.shape[0] # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) x, (Hp, Wp) = self.patch_embed(x) if self.pos_embed is not None: # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = self.pos_drop(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, Hp, Wp) else: x = blk(x, Hp, Wp) x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) for blk in self.cls_attn_blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] # For consistency with timm's transformer models while being compatible with official weights source we rename # pos_embeder to pos_embed. Also account for use_pos_embed == False use_pos_embed = getattr(model, 'pos_embed', None) is not None pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] for k in pos_embed_keys: if use_pos_embed: state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) else: del state_dict[k] # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): num_ca_blocks = len(model.cls_attn_blocks) for i in range(num_ca_blocks): qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) for j, subscript in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) if qkv_bias is not None: qkv_bias = qkv_bias.reshape(3, -1) for j, subscript in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] return state_dict def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Cross-Covariance Image Transformers models.') model = build_model_with_cfg( Xcit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # Patch size 16 'xcit_nano_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), 'xcit_nano_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), 'xcit_nano_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), 'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), 'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), 'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), 'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), 'xcit_small_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), 'xcit_small_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), 'xcit_small_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), 'xcit_small_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), 'xcit_medium_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), 'xcit_medium_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), 'xcit_large_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), 'xcit_large_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), # Patch size 8 'xcit_nano_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), 'xcit_nano_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), 'xcit_nano_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), 'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), 'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), 'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), 'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), 'xcit_small_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), 'xcit_small_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), 'xcit_small_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), 'xcit_small_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), 'xcit_medium_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), 'xcit_medium_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), 'xcit_large_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), 'xcit_large_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), }) @register_model def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384) model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model # Patch size 8x8 models @register_model def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { # Patch size 16 'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k', 'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k', 'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k', 'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k', 'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k', 'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k', 'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k', 'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k', 'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k', 'xcit_small_24_p16_384_dist': 'xcit_small_24_p16_384.fb_dist_in1k', 'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k', 'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k', 'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k', 'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k', # Patch size 8 'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k', 'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k', 'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k', 'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k', 'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k', 'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k', 'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k', 'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k', 'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k', 'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k', 'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k', 'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k', 'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k', 'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k', })
pytorch-image-models/timm/models/xcit.py/0
{ "file_path": "pytorch-image-models/timm/models/xcit.py", "repo_id": "pytorch-image-models", "token_count": 18692 }
182
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2021 Ross Wightman """ import logging from itertools import islice from typing import Optional, Callable, Tuple import torch import torch.nn as nn import torch.optim as optim from timm.models import group_parameters from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adan import Adan from .lamb import Lamb from .lars import Lars from .lion import Lion from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nadamw import NAdamW from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .sgdw import SGDW _logger = logging.getLogger(__name__) # optimizers to default to multi-tensor _DEFAULT_FOREACH = { 'lion', } def param_groups_weight_decay( model: nn.Module, weight_decay=1e-5, no_weight_decay_list=() ): no_weight_decay_list = set(no_weight_decay_list) decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def _group(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def _layer_map(model, layers_per_group=12, num_groups=None): def _in_head(n, hp): if not hp: return True elif isinstance(hp, (tuple, list)): return any([n.startswith(hpi) for hpi in hp]) else: return n.startswith(hp) head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None) names_trunk = [] names_head = [] for n, _ in model.named_parameters(): names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n) # group non-head layers num_trunk_layers = len(names_trunk) if num_groups is not None: layers_per_group = -(num_trunk_layers // -num_groups) names_trunk = list(_group(names_trunk, layers_per_group)) num_trunk_groups = len(names_trunk) layer_map = {n: i for i, l in enumerate(names_trunk) for n in l} layer_map.update({n: num_trunk_groups for n in names_head}) return layer_map def param_groups_layer_decay( model: nn.Module, weight_decay: float = 0.05, no_weight_decay_list: Tuple[str] = (), layer_decay: float = .75, end_layer_decay: Optional[float] = None, verbose: bool = False, ): """ Parameter groups for layer-wise lr decay & weight decay Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 """ no_weight_decay_list = set(no_weight_decay_list) param_group_names = {} # NOTE for debugging param_groups = {} if hasattr(model, 'group_matcher'): # FIXME interface needs more work layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True) else: # fallback layer_map = _layer_map(model) num_layers = max(layer_map.values()) + 1 layer_max = num_layers - 1 layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers)) for name, param in model.named_parameters(): if not param.requires_grad: continue # no decay: all 1D parameters and model specific ones if param.ndim == 1 or name in no_weight_decay_list: g_decay = "no_decay" this_decay = 0. else: g_decay = "decay" this_decay = weight_decay layer_id = layer_map.get(name, layer_max) group_name = "layer_%d_%s" % (layer_id, g_decay) if group_name not in param_groups: this_scale = layer_scales[layer_id] param_group_names[group_name] = { "lr_scale": this_scale, "weight_decay": this_decay, "param_names": [], } param_groups[group_name] = { "lr_scale": this_scale, "weight_decay": this_decay, "params": [], } param_group_names[group_name]["param_names"].append(name) param_groups[group_name]["params"].append(param) if verbose: import json _logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) return list(param_groups.values()) def optimizer_kwargs(cfg): """ cfg/argparse to kwargs helper Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. """ kwargs = dict( opt=cfg.opt, lr=cfg.lr, weight_decay=cfg.weight_decay, momentum=cfg.momentum, ) if getattr(cfg, 'opt_eps', None) is not None: kwargs['eps'] = cfg.opt_eps if getattr(cfg, 'opt_betas', None) is not None: kwargs['betas'] = cfg.opt_betas if getattr(cfg, 'layer_decay', None) is not None: kwargs['layer_decay'] = cfg.layer_decay if getattr(cfg, 'opt_args', None) is not None: kwargs.update(cfg.opt_args) if getattr(cfg, 'opt_foreach', None) is not None: kwargs['foreach'] = cfg.opt_foreach return kwargs def create_optimizer(args, model, filter_bias_and_bn=True): """ Legacy optimizer factory for backwards compatibility. NOTE: Use create_optimizer_v2 for new code. """ return create_optimizer_v2( model, **optimizer_kwargs(cfg=args), filter_bias_and_bn=filter_bias_and_bn, ) def create_optimizer_v2( model_or_params, opt: str = 'sgd', lr: Optional[float] = None, weight_decay: float = 0., momentum: float = 0.9, foreach: Optional[bool] = None, filter_bias_and_bn: bool = True, layer_decay: Optional[float] = None, param_group_fn: Optional[Callable] = None, **kwargs, ): """ Create an optimizer. TODO currently the model is passed in and all parameters are selected for optimization. For more general use an interface that allows selection of parameters to optimize and lr groups, one of: * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion * expose the parameters interface and leave it up to caller Args: model_or_params (nn.Module): model containing parameters to optimize opt: name of optimizer to create lr: initial learning rate weight_decay: weight decay to apply in optimizer momentum: momentum for momentum based optimizers (others may use betas via kwargs) foreach: Enable / disable foreach (multi-tensor) operation if True / False. Choose safe default if None filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay **kwargs: extra optimizer specific kwargs to pass through Returns: Optimizer """ if isinstance(model_or_params, nn.Module): # a model was passed in, extract parameters and add weight decays to appropriate layers no_weight_decay = {} if hasattr(model_or_params, 'no_weight_decay'): no_weight_decay = model_or_params.no_weight_decay() if param_group_fn: parameters = param_group_fn(model_or_params) elif layer_decay is not None: parameters = param_groups_layer_decay( model_or_params, weight_decay=weight_decay, layer_decay=layer_decay, no_weight_decay_list=no_weight_decay, ) weight_decay = 0. elif weight_decay and filter_bias_and_bn: parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay) weight_decay = 0. else: parameters = model_or_params.parameters() else: # iterable of parameters or param groups passed in parameters = model_or_params opt_lower = opt.lower() opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower.startswith('fused'): try: from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD has_apex = True except ImportError: has_apex = False assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' if opt_lower.startswith('bnb'): try: import bitsandbytes as bnb has_bnb = True except ImportError: has_bnb = False assert has_bnb and torch.cuda.is_available(), 'bitsandbytes and CUDA required for bnb optimizers' opt_args = dict(weight_decay=weight_decay, **kwargs) if lr is not None: opt_args.setdefault('lr', lr) if foreach is None: if opt in _DEFAULT_FOREACH: opt_args.setdefault('foreach', True) else: opt_args['foreach'] = foreach # basic SGD & related if opt_lower == 'sgd' or opt_lower == 'nesterov': # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'sgdw' or opt_lower == 'nesterovw': # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons opt_args.pop('eps', None) optimizer = SGDW(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'momentumw': opt_args.pop('eps', None) optimizer = SGDW(parameters, momentum=momentum, nesterov=False, **opt_args) # adaptive elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'nadam': try: # NOTE PyTorch >= 1.10 should have native NAdam optimizer = optim.Nadam(parameters, **opt_args) except AttributeError: optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'nadamw': optimizer = NAdamW(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamax': optimizer = optim.Adamax(parameters, **opt_args) elif opt_lower == 'adabelief': optimizer = AdaBelief(parameters, rectify=False, **opt_args) elif opt_lower == 'radabelief': optimizer = AdaBelief(parameters, rectify=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adagrad': opt_args.setdefault('eps', 1e-8) optimizer = optim.Adagrad(parameters, **opt_args) elif opt_lower == 'adafactor': optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adanp': optimizer = Adan(parameters, no_prox=False, **opt_args) elif opt_lower == 'adanw': optimizer = Adan(parameters, no_prox=True, **opt_args) elif opt_lower == 'lamb': optimizer = Lamb(parameters, **opt_args) elif opt_lower == 'lambc': optimizer = Lamb(parameters, trust_clip=True, **opt_args) elif opt_lower == 'larc': optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) elif opt_lower == 'lars': optimizer = Lars(parameters, momentum=momentum, **opt_args) elif opt_lower == 'nlarc': optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) elif opt_lower == 'nlars': optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'madgrad': optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) elif opt_lower == 'madgradw': optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': optimizer = NvNovoGrad(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) elif opt_lower == 'lion': opt_args.pop('eps', None) optimizer = Lion(parameters, **opt_args) # second order elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) # NVIDIA fused optimizers, require APEX to be installed elif opt_lower == 'fusedsgd': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'fusedmomentum': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'fusedadam': optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) elif opt_lower == 'fusedadamw': optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) elif opt_lower == 'fusedlamb': optimizer = FusedLAMB(parameters, **opt_args) elif opt_lower == 'fusednovograd': opt_args.setdefault('betas', (0.95, 0.98)) optimizer = FusedNovoGrad(parameters, **opt_args) # bitsandbytes optimizers, require bitsandbytes to be installed elif opt_lower == 'bnbsgd': opt_args.pop('eps', None) optimizer = bnb.optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'bnbsgd8bit': opt_args.pop('eps', None) optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'bnbmomentum': opt_args.pop('eps', None) optimizer = bnb.optim.SGD(parameters, momentum=momentum, **opt_args) elif opt_lower == 'bnbmomentum8bit': opt_args.pop('eps', None) optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, **opt_args) elif opt_lower == 'bnbadam': optimizer = bnb.optim.Adam(parameters, **opt_args) elif opt_lower == 'bnbadam8bit': optimizer = bnb.optim.Adam8bit(parameters, **opt_args) elif opt_lower == 'bnbadamw': optimizer = bnb.optim.AdamW(parameters, **opt_args) elif opt_lower == 'bnbadamw8bit': optimizer = bnb.optim.AdamW8bit(parameters, **opt_args) elif opt_lower == 'bnblamb': optimizer = bnb.optim.LAMB(parameters, **opt_args) elif opt_lower == 'bnblamb8bit': optimizer = bnb.optim.LAMB8bit(parameters, **opt_args) elif opt_lower == 'bnblars': optimizer = bnb.optim.LARS(parameters, **opt_args) elif opt_lower == 'bnblarsb8bit': optimizer = bnb.optim.LAMB8bit(parameters, **opt_args) elif opt_lower == 'bnblion': optimizer = bnb.optim.Lion(parameters, **opt_args) elif opt_lower == 'bnblion8bit': optimizer = bnb.optim.Lion8bit(parameters, **opt_args) else: assert False and "Invalid optimizer" raise ValueError if len(opt_split) > 1: if opt_split[0] == 'lookahead': optimizer = Lookahead(optimizer) return optimizer
pytorch-image-models/timm/optim/optim_factory.py/0
{ "file_path": "pytorch-image-models/timm/optim/optim_factory.py", "repo_id": "pytorch-image-models", "token_count": 6927 }
183
""" Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 Ross Wightman """ import glob import operator import os import logging import torch from .model import unwrap_model, get_state_dict _logger = logging.getLogger(__name__) class CheckpointSaver: def __init__( self, model, optimizer, args=None, model_ema=None, amp_scaler=None, checkpoint_prefix='checkpoint', recovery_prefix='recovery', checkpoint_dir='', recovery_dir='', decreasing=False, max_history=10, unwrap_fn=unwrap_model): # objects to save state_dicts of self.model = model self.optimizer = optimizer self.args = args self.model_ema = model_ema self.amp_scaler = amp_scaler # state self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness self.best_epoch = None self.best_metric = None self.curr_recovery_file = '' self.last_recovery_file = '' # config self.checkpoint_dir = checkpoint_dir self.recovery_dir = recovery_dir self.save_prefix = checkpoint_prefix self.recovery_prefix = recovery_prefix self.extension = '.pth.tar' self.decreasing = decreasing # a lower metric is better if True self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs self.max_history = max_history self.unwrap_fn = unwrap_fn assert self.max_history >= 1 def save_checkpoint(self, epoch, metric=None): assert epoch >= 0 tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) self._save(tmp_save_path, epoch, metric) if os.path.exists(last_save_path): os.unlink(last_save_path) # required for Windows support. os.rename(tmp_save_path, last_save_path) worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None if (len(self.checkpoint_files) < self.max_history or metric is None or self.cmp(metric, worst_file[1])): if len(self.checkpoint_files) >= self.max_history: self._cleanup_checkpoints(1) filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension save_path = os.path.join(self.checkpoint_dir, filename) os.link(last_save_path, save_path) self.checkpoint_files.append((save_path, metric)) self.checkpoint_files = sorted( self.checkpoint_files, key=lambda x: x[1], reverse=not self.decreasing) # sort in descending order if a lower metric is not better checkpoints_str = "Current checkpoints:\n" for c in self.checkpoint_files: checkpoints_str += ' {}\n'.format(c) _logger.info(checkpoints_str) if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): self.best_epoch = epoch self.best_metric = metric best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) if os.path.exists(best_save_path): os.unlink(best_save_path) os.link(last_save_path, best_save_path) return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) def _save(self, save_path, epoch, metric=None): save_state = { 'epoch': epoch, 'arch': type(self.model).__name__.lower(), 'state_dict': get_state_dict(self.model, self.unwrap_fn), 'optimizer': self.optimizer.state_dict(), 'version': 2, # version < 2 increments epoch before save } if self.args is not None: save_state['arch'] = self.args.model save_state['args'] = self.args if self.amp_scaler is not None: save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() if self.model_ema is not None: save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) if metric is not None: save_state['metric'] = metric torch.save(save_state, save_path) def _cleanup_checkpoints(self, trim=0): trim = min(len(self.checkpoint_files), trim) delete_index = self.max_history - trim if delete_index < 0 or len(self.checkpoint_files) <= delete_index: return to_delete = self.checkpoint_files[delete_index:] for d in to_delete: try: _logger.debug("Cleaning checkpoint: {}".format(d)) os.remove(d[0]) except Exception as e: _logger.error("Exception '{}' while deleting checkpoint".format(e)) self.checkpoint_files = self.checkpoint_files[:delete_index] def save_recovery(self, epoch, batch_idx=0): assert epoch >= 0 filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension save_path = os.path.join(self.recovery_dir, filename) self._save(save_path, epoch) if os.path.exists(self.last_recovery_file): try: _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) os.remove(self.last_recovery_file) except Exception as e: _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) self.last_recovery_file = self.curr_recovery_file self.curr_recovery_file = save_path def find_recovery(self): recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) files = glob.glob(recovery_path + '*' + self.extension) files = sorted(files) return files[0] if len(files) else ''
pytorch-image-models/timm/utils/checkpoint_saver.py/0
{ "file_path": "pytorch-image-models/timm/utils/checkpoint_saver.py", "repo_id": "pytorch-image-models", "token_count": 2818 }
184
#!/usr/bin/env python3 """ ImageNet Validation Script This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit. Hacked together by Ross Wightman (https://github.com/rwightman) """ import argparse import csv import glob import json import logging import os import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet from timm.layers import apply_test_time_pool, set_fast_norm from timm.models import create_model, load_checkpoint, is_model, list_models from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser, \ decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model try: from apex import amp has_apex = True except ImportError: has_apex = False has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('data', nargs='?', metavar='DIR', const=None, help='path to dataset (*deprecated*, use --data-dir)') parser.add_argument('--data-dir', metavar='DIR', help='path to dataset (root dir)') parser.add_argument('--dataset', metavar='NAME', default='', help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)') parser.add_argument('--split', metavar='NAME', default='validation', help='dataset split (default: validation)') parser.add_argument('--num-samples', default=None, type=int, metavar='N', help='Manually specify num samples in dataset split, for IterableDatasets.') parser.add_argument('--dataset-download', action='store_true', default=False, help='Allow download of dataset for torch/ and tfds/ datasets that support it.') parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', help='path to class to idx mapping file (default: "")') parser.add_argument('--input-key', default=None, type=str, help='Dataset key for input images.') parser.add_argument('--input-img-mode', default=None, type=str, help='Dataset image conversion mode for input images.') parser.add_argument('--target-key', default=None, type=str, help='Dataset key for target labels.') parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', help='model architecture (default: dpn92)') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--in-chans', type=int, default=None, metavar='N', help='Image input channels (default: None => 3)') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='force use of train input size, even when test size is specified in pretrained cfg') parser.add_argument('--crop-pct', default=None, type=float, metavar='N', help='Input image center crop pct') parser.add_argument('--crop-mode', default=None, type=str, metavar='N', help='Input image crop mode (squash, border, center). Model default if None.') parser.add_argument('--crop-border-pixels', type=int, default=None, help='Crop pixels from image border.') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--log-freq', default=10, type=int, metavar='N', help='batch logging frequency (default: 10)') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--num-gpu', type=int, default=1, help='Number of GPUS to use') parser.add_argument('--test-pool', dest='test_pool', action='store_true', help='enable test time pool') parser.add_argument('--no-prefetcher', action='store_true', default=False, help='disable fast prefetcher') parser.add_argument('--pin-mem', action='store_true', default=False, help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--device', default='cuda', type=str, help="Device (accelerator) to use.") parser.add_argument('--amp', action='store_true', default=False, help='use NVIDIA Apex AMP or Native AMP for mixed precision training') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16)') parser.add_argument('--amp-impl', default='native', type=str, help='AMP impl to use, "native" or "apex" (default: native)') parser.add_argument('--tf-preprocessing', action='store_true', default=False, help='Use Tensorflow preprocessing pipeline (require CPU TF installed') parser.add_argument('--use-ema', dest='use_ema', action='store_true', help='use ema version of weights if present') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', default=False, action='store_true', help='torch.jit.script the full model') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help="Enable compilation w/ specified backend (default: inductor).") scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help="Enable AOT Autograd support.") parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME', help='Real labels JSON file for imagenet evaluation') parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', help='Valid label indices txt file for validation of partial label space') parser.add_argument('--retry', default=False, action='store_true', help='Enable batch size decay & retry for single model validation') def validate(args): # might as well try to validate something args.pretrained = args.pretrained or not args.checkpoint args.prefetcher = not args.no_prefetcher if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True device = torch.device(args.device) # resolve AMP arguments based on PyTorch / Apex availability use_amp = None amp_autocast = suppress if args.amp: if args.amp_impl == 'apex': assert has_apex, 'AMP impl specified as APEX but APEX is not installed.' assert args.amp_dtype == 'float16' use_amp = 'apex' _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') else: assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).' assert args.amp_dtype in ('float16', 'bfloat16') use_amp = 'native' amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16 amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype) _logger.info('Validating in mixed precision with native PyTorch AMP.') else: _logger.info('Validating in float32. AMP not enabled.') if args.fuser: set_jit_fuser(args.fuser) if args.fast_norm: set_fast_norm() # create model in_chans = 3 if args.in_chans is not None: in_chans = args.in_chans elif args.input_size is not None: in_chans = args.input_size[0] model = create_model( args.model, pretrained=args.pretrained, num_classes=args.num_classes, in_chans=in_chans, global_pool=args.gp, scriptable=args.torchscript, **args.model_kwargs, ) if args.num_classes is None: assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' args.num_classes = model.num_classes if args.checkpoint: load_checkpoint(model, args.checkpoint, args.use_ema) if args.reparam: model = reparameterize_model(model) param_count = sum([m.numel() for m in model.parameters()]) _logger.info('Model %s created, param count: %d' % (args.model, param_count)) data_config = resolve_data_config( vars(args), model=model, use_test_size=not args.use_train_size, verbose=True, ) test_time_pool = False if args.test_pool: model, test_time_pool = apply_test_time_pool(model, data_config) model = model.to(device) if args.channels_last: model = model.to(memory_format=torch.channels_last) if args.torchscript: assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' model = torch.jit.script(model) elif args.torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.' torch._dynamo.reset() model = torch.compile(model, backend=args.torchcompile) elif args.aot_autograd: assert has_functorch, "functorch is needed for --aot-autograd" model = memory_efficient_fusion(model) if use_amp == 'apex': model = amp.initialize(model, opt_level='O1') if args.num_gpu > 1: model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) criterion = nn.CrossEntropyLoss().to(device) root_dir = args.data or args.data_dir if args.input_img_mode is None: input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L' else: input_img_mode = args.input_img_mode dataset = create_dataset( root=root_dir, name=args.dataset, split=args.split, download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map, num_samples=args.num_samples, input_key=args.input_key, input_img_mode=input_img_mode, target_key=args.target_key, ) if args.valid_labels: with open(args.valid_labels, 'r') as f: valid_labels = [int(line.rstrip()) for line in f] else: valid_labels = None if args.real_labels: real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels) else: real_labels = None crop_pct = 1.0 if test_time_pool else data_config['crop_pct'] loader = create_loader( dataset, input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=crop_pct, crop_mode=data_config['crop_mode'], crop_border_pixels=args.crop_border_pixels, pin_memory=args.pin_mem, device=device, tf_preprocessing=args.tf_preprocessing, ) batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() with torch.no_grad(): # warmup, reduce variability of first batch time, especially for comparing torchscript vs non input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): model(input) end = time.time() for batch_idx, (input, target) in enumerate(loader): if args.no_prefetcher: target = target.to(device) input = input.to(device) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) # compute output with amp_autocast(): output = model(input) if valid_labels is not None: output = output[:, valid_labels] loss = criterion(output, target) if real_labels is not None: real_labels.add_result(output) # measure accuracy and record loss acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1.item(), input.size(0)) top5.update(acc5.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if batch_idx % args.log_freq == 0: _logger.info( 'Test: [{0:>4d}/{1}] ' 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' 'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) ' 'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format( batch_idx, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, loss=losses, top1=top1, top5=top5 ) ) if real_labels is not None: # real labels mode replaces topk values at the end top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5) else: top1a, top5a = top1.avg, top5.avg results = OrderedDict( model=args.model, top1=round(top1a, 4), top1_err=round(100 - top1a, 4), top5=round(top5a, 4), top5_err=round(100 - top5a, 4), param_count=round(param_count / 1e6, 2), img_size=data_config['input_size'][-1], crop_pct=crop_pct, interpolation=data_config['interpolation'], ) _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format( results['top1'], results['top1_err'], results['top5'], results['top5_err'])) return results def _try_run(args, initial_batch_size): batch_size = initial_batch_size results = OrderedDict() error_str = 'Unknown' while batch_size: args.batch_size = batch_size * args.num_gpu # multiply by num-gpu for DataParallel case try: if torch.cuda.is_available() and 'cuda' in args.device: torch.cuda.empty_cache() results = validate(args) return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running validation.') if not check_batch_size_retry(error_str): break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str _logger.error(f'{args.model} failed to validate ({error_str}).') return results _NON_IN1K_FILTERS = ['*_in21k', '*_in22k', '*in12k', '*_dino', '*fcmae', '*seer'] def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if os.path.isdir(args.checkpoint): # validate all checkpoints in a path with same model checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') checkpoints += glob.glob(args.checkpoint + '/*.pth') model_names = list_models(args.model) model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] else: if args.model == 'all': # validate all models in a list of names with pretrained checkpoints args.pretrained = True model_names = list_models( pretrained=True, exclude_filters=_NON_IN1K_FILTERS, ) model_cfgs = [(n, '') for n in model_names] elif not is_model(args.model): # model name doesn't exist, try as wildcard filter model_names = list_models( args.model, pretrained=True, ) model_cfgs = [(n, '') for n in model_names] if not model_cfgs and os.path.isfile(args.model): with open(args.model) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names if n] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: initial_batch_size = args.batch_size for m, c in model_cfgs: args.model = m args.checkpoint = c r = _try_run(args, initial_batch_size) if 'error' in r: continue if args.checkpoint: r['checkpoint'] = args.checkpoint results.append(r) except KeyboardInterrupt as e: pass results = sorted(results, key=lambda x: x['top1'], reverse=True) else: if args.retry: results = _try_run(args, args.batch_size) else: results = validate(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) # output results in JSON to stdout w/ delimiter for runner script print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()
pytorch-image-models/validate.py/0
{ "file_path": "pytorch-image-models/validate.py", "repo_id": "pytorch-image-models", "token_count": 9310 }
185
Hugging Face Optimized Inference License 1.0 (HFOILv1.0) This License Agreement governs the use of the Software and its Modifications. It is a binding agreement between the Licensor and You. This License Agreement shall be referred to as Hugging Face Optimized Inference License 1.0 or HFOILv1.0. We may publish revised versions of this License Agreement from time to time. Each version will be given a distinguished number. By downloading, accessing, modifying, distributing or otherwise using the Software, You consent to all of the terms and conditions below. So, if You do not agree with those, please do not download, access, modify, distribute, or use the Software. 1. PERMISSIONS You may use, modify and distribute the Software pursuant to the following terms and conditions: Copyright License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare, publicly display, publicly perform, sublicense under the terms herein, and distribute the Software and Modifications of the Software. Patent License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free patent license to make, have made, Use, offer to sell, sell, import, and otherwise transfer the Software, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Software to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Software or a Contribution incorporated within the Software constitutes direct or contributory patent infringement, then any rights granted to You under this License Agreement for the Software shall terminate as of the date such litigation is filed. No other rights. All rights not expressly granted herein are retained. 2. RESTRICTIONS You may not distribute the Software as a hosted or managed, and paid service, where the service grants users access to any substantial set of the features or functionality of the Software. If you wish to do so, You will need to be granted additional rights from the Licensor which will be subject to a separate mutually agreed agreement. You may not sublicense the Software under any other terms than those listed in this License. 3. OBLIGATIONS When You modify the Software, You agree to: - attach a notice stating the Modifications of the Software You made; and - attach a notice stating that the Modifications of the Software are released under this License Agreement. When You distribute the Software or Modifications of the Software, You agree to: - give any recipients of the Software a copy of this License Agreement; - retain all Explanatory Documentation; and if sharing the Modifications of the Software, add Explanatory Documentation documenting the changes made to create the Modifications of the Software; - retain all copyright, patent, trademark and attribution notices. 4. MISCELLANEOUS Termination. Licensor reserves the right to restrict Use of the Software in violation of this License Agreement, upon which Your licenses will automatically terminate. Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Software by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. Trademarks and related. Nothing in this License Agreement permits You (i) to make Use of Licensorsโ€™ trademarks, trade names, or logos, (ii) otherwise suggest endorsement by Licensor, or (iii) misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. Output You generate. Licensor claims no rights in the Output. You agree not to contravene any provision as stated in the License Agreement with your Use of the Output. Disclaimer of Warranty. Except as expressly provided otherwise herein, and to the fullest extent permitted by law, Licensor provides the Software (and each Contributor provides its Contributions) AS IS, and Licensor disclaims all warranties or guarantees of any kind, express or implied, whether arising under any law or from any usage in trade, or otherwise including but not limited to the implied warranties of merchantability, non-infringement, quiet enjoyment, fitness for a particular purpose, or otherwise. You are solely responsible for determining the appropriateness of the Software and Modifications of the Software for your purposes (including your use or distribution of the Software and Modifications of the Software), and assume any risks associated with Your exercise of permissions under this License Agreement. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License Agreement or out of the Use or inability to Use the Software (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, model failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. Accepting Warranty or Additional Liability. While sharing the Software or Modifications of the Software thereof, You may choose to offer and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License Agreement. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of Licensor or any other Contributor, and you hereby agree to indemnify, defend, and hold Licensor and each other Contributor (and their successors or assigns) harmless for any liability incurred by, or claims asserted against, such Licensor or Contributor (and their successors or assigns) by reason of your accepting any such warranty or additional liability. Severability. This License Agreement is a license of copyright and patent rights and an agreement in contract between You and the Licensor. If any provision of this License Agreement is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. 5. DEFINITIONS โ€œContributionโ€ refers to any work of authorship, including the original version of the Software and any Modifications of the Software that is intentionally submitted to Licensor for inclusion in the Software by the copyright owner or by an individual or entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, โ€œsubmittedโ€ means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Software, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as โ€œNot a Contribution.โ€ โ€œContributorโ€ refers to Licensor and any individual or entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Software. โ€œDataโ€ refers to a collection of information extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License Agreement. โ€œExplanatory Documentationโ€ refers to any documentation or related information including but not limited to model cards or data cards dedicated to inform the public about the characteristics of the Software. Explanatory documentation is not licensed under this License. "License Agreement" refers to these terms and conditions. โ€œLicensorโ€ refers to the rights owners or entity authorized by the rights owners that are granting the terms and conditions of this License Agreement. โ€œModelโ€ refers to machine-learning based assemblies (including checkpoints), consisting of learnt weights and parameters (including optimizer states), corresponding to a model architecture as embodied in Software source code. Source code is not licensed under this License Agreement. โ€œModifications of the Softwareโ€ refers to all changes to the Software, including without limitation derivative works of the Software. โ€œOutputโ€ refers to the results of operating the Software. โ€œShareโ€ refers to any transmission, reproduction, publication or other sharing of the Software or Modifications of the Software to a third party, including providing the Softwaire as a hosted service made available by electronic or other remote means, including - but not limited to - API-based or web access. โ€œSoftwareโ€ refers to the software and Model (or parts of either) that Licensor makes available under this License Agreement. โ€œThird Partiesโ€ refers to individuals or legal entities that are not under common control with Licensor or You. โ€œUseโ€ refers to anything You or your representatives do with the Software, including but not limited to generating any Output, fine tuning, updating, running, training, evaluating and/or reparametrizing the Model. "You" (or "Your") refers to an individual or Legal Entity exercising permissions granted by this License Agreement and/or making Use of the Software for whichever purpose and in any field of Use.
text-generation-inference/LICENSE/0
{ "file_path": "text-generation-inference/LICENSE", "repo_id": "text-generation-inference", "token_count": 2207 }
186
# Text Generation The Hugging Face Text Generation Python library provides a convenient way of interfacing with a `text-generation-inference` instance running on [Hugging Face Inference Endpoints](https://huggingface.co/inference-endpoints) or on the Hugging Face Hub. ## Get Started ### Install ```shell pip install text-generation ``` ### Inference API Usage ```python from text_generation import InferenceAPIClient client = InferenceAPIClient("bigscience/bloomz") text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import InferenceAPIAsyncClient client = InferenceAPIAsyncClient("bigscience/bloomz") response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` Check all currently deployed models on the Huggingface Inference API with `Text Generation` support: ```python from text_generation.inference_api import deployed_models print(deployed_models()) ``` ### Hugging Face Inference Endpoint usage ```python from text_generation import Client endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = Client(endpoint_url) text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import AsyncClient endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = AsyncClient(endpoint_url) response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` ### Types ```python # Request Parameters class Parameters: # Activate logits sampling do_sample: bool # Maximum number of generated tokens max_new_tokens: int # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] # Whether to prepend the prompt to the generated text return_full_text: bool # Stop generating tokens if a member of `stop_sequences` is generated stop: List[str] # Random sampling seed seed: Optional[int] # The value used to module the logits distribution. temperature: Optional[float] # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_k: Optional[int] # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or # higher are kept for generation. top_p: Optional[float] # truncate inputs tokens to the given size truncate: Optional[int] # Typical Decoding mass # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information typical_p: Optional[float] # Generate best_of sequences and return the one if the highest token logprobs best_of: Optional[int] # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) watermark: bool # Get decoder input token logprobs and ids decoder_input_details: bool # Return the N most likely tokens at each step top_n_tokens: Optional[int] # Decoder input tokens class InputToken: # Token ID from the model tokenizer id: int # Token text text: str # Logprob # Optional since the logprob of the first token cannot be computed logprob: Optional[float] # Generated tokens class Token: # Token ID from the model tokenizer id: int # Token text text: str # Logprob logprob: float # Is the token a special token # Can be used to ignore tokens when concatenating special: bool # Generation finish reason class FinishReason(Enum): # number of generated tokens == `max_new_tokens` Length = "length" # the model generated its end of sequence token EndOfSequenceToken = "eos_token" # the model generated a text included in `stop_sequences` StopSequence = "stop_sequence" # Additional sequences when using the `best_of` parameter class BestOfSequence: # Generated text generated_text: str # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] # `generate` details class Details: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] # Additional sequences when using the `best_of` parameter best_of_sequences: Optional[List[BestOfSequence]] # `generate` return value class Response: # Generated text generated_text: str # Generation details details: Details # `generate_stream` details class StreamDetails: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # `generate_stream` return value class StreamResponse: # Generated token token: Token # Most likely tokens top_tokens: Optional[List[Token]] # Complete generated text # Only available when the generation is finished generated_text: Optional[str] # Generation details # Only available when the generation is finished details: Optional[StreamDetails] # Inference API currently deployed model class DeployedModel: model_id: str sha: str ```
text-generation-inference/clients/python/README.md/0
{ "file_path": "text-generation-inference/clients/python/README.md", "repo_id": "text-generation-inference", "token_count": 2193 }
187
# Consuming Text Generation Inference There are many ways you can consume Text Generation Inference server in your applications. After launching, you can use the `/generate` route and make a `POST` request to get results from the server. You can also use the `/generate_stream` route if you want TGI to return a stream of tokens. You can make the requests using the tool of your preference, such as curl, Python or TypeScrpt. For a final end-to-end experience, we also open-sourced ChatUI, a chat interface for open-source models. ## curl After the launch, you can query the model using either the `/generate` or `/generate_stream` routes: ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ## Inference Client [`huggingface-hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a nice high-level class, [`~huggingface_hub.InferenceClient`], which makes it easy to make calls to a TGI endpoint. `InferenceClient` also takes care of parameter validation and provides a simple to-use interface. You can simply install `huggingface-hub` package with pip. ```bash pip install huggingface-hub ``` Once you start the TGI server, instantiate `InferenceClient()` with the URL to the endpoint serving the model. You can then call `text_generation()` to hit the endpoint through Python. ```python from huggingface_hub import InferenceClient client = InferenceClient(model="http://127.0.0.1:8080") client.text_generation(prompt="Write a code for snake game") ``` You can do streaming with `InferenceClient` by passing `stream=True`. Streaming will return tokens as they are being generated in the server. To use streaming, you can do as follows: ```python for token in client.text_generation("How do you make cheese?", max_new_tokens=12, stream=True): print(token) ``` Another parameter you can use with TGI backend is `details`. You can get more details on generation (tokens, probabilities, etc.) by setting `details` to `True`. When it's specified, TGI will return a `TextGenerationResponse` or `TextGenerationStreamResponse` rather than a string or stream. ```python output = client.text_generation(prompt="Meaning of life is", details=True) print(output) # TextGenerationResponse(generated_text=' a complex concept that is not always clear to the individual. It is a concept that is not always', details=Details(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=20, seed=None, prefill=[], tokens=[Token(id=267, text=' a', logprob=-2.0723474, special=False), Token(id=11235, text=' complex', logprob=-3.1272552, special=False), Token(id=17908, text=' concept', logprob=-1.3632495, special=False),..)) ``` You can see how to stream below. ```python output = client.text_generation(prompt="Meaning of life is", stream=True, details=True) print(next(iter(output))) # TextGenerationStreamResponse(token=Token(id=267, text=' a', logprob=-2.0723474, special=False), generated_text=None, details=None) ``` You can check out the details of the function [here](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation). There is also an async version of the client, `AsyncInferenceClient`, based on `asyncio` and `aiohttp`. You can find docs for it [here](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.AsyncInferenceClient) ## ChatUI ChatUI is an open-source interface built for LLM serving. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces. To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served. ``` { // rest of the model config here "endpoints": [{"url": "https://HOST:PORT/generate_stream"}] } ``` ![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chatui_screen.png) ## Gradio Gradio is a Python library that helps you build web applications for your machine learning models with a few lines of code. It has a `ChatInterface` wrapper that helps create neat UIs for chatbots. Let's take a look at how to create a chatbot with streaming mode using TGI and Gradio. Let's install Gradio and Hub Python library first. ```bash pip install huggingface-hub gradio ``` Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client). ```python import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient(model="http://127.0.0.1:8080") def inference(message, history): partial_message = "" for token in client.text_generation(message, max_new_tokens=20, stream=True): partial_message += token yield partial_message gr.ChatInterface( inference, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Chat with me!", container=False, scale=7), description="This is the demo for Gradio UI consuming TGI endpoint with LLaMA 7B-Chat model.", title="Gradio ๐Ÿค TGI", examples=["Are tomatoes vegetables?"], retry_btn="Retry", undo_btn="Undo", clear_btn="Clear", ).queue().launch() ``` The UI looks like this ๐Ÿ‘‡ <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/gradio-tgi.png" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/gradio-tgi-dark.png" /> </div> You can try the demo directly here ๐Ÿ‘‡ <div class="block dark:hidden"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=light" width="850" height="750" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=dark" width="850" height="750" ></iframe> </div> You can disable streaming mode using `return` instead of `yield` in your inference function, like below. ```python def inference(message, history): return client.text_generation(message, max_new_tokens=20) ``` You can read more about how to customize a `ChatInterface` [here](https://www.gradio.app/guides/creating-a-chatbot-fast). ## API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available [here](https://huggingface.github.io/text-generation-inference).
text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md", "repo_id": "text-generation-inference", "token_count": 2262 }
188
# Messages API Text Generation Inference (TGI) now supports the Messages API, which is fully compatible with the OpenAI Chat Completion API. This feature is available starting from version 1.4.0. You can use OpenAI's client libraries or third-party libraries expecting OpenAI schema to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility. > **Note:** The Messages API is supported from TGI version 1.4.0 and above. Ensure you are using a compatible version to access this feature. #### Table of Contents - [Making a Request](#making-a-request) - [Streaming](#streaming) - [Synchronous](#synchronous) - [Hugging Face Inference Endpoints](#hugging-face-inference-endpoints) - [Cloud Providers](#cloud-providers) - [Amazon SageMaker](#amazon-sagemaker) ## Making a Request You can make a request to TGI's Messages API using `curl`. Here's an example: ```bash curl localhost:3000/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` ## Streaming You can also use OpenAI's Python client library to make a streaming request. Here's how: ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( base_url="http://localhost:3000/v1", api_key="-" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=True ) # iterate and print stream for message in chat_completion: print(message) ``` ## Synchronous If you prefer to make a synchronous request, you can do so like this: ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( base_url="http://localhost:3000/v1", api_key="-" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=False ) print(chat_completion) ``` ## Hugging Face Inference Endpoints The Messages API is integrated with [Inference Endpoints](https://huggingface.co/inference-endpoints/dedicated). Every endpoint that uses "Text Generation Inference" with an LLM, which has a chat template can now be used. Below is an example of how to use IE with TGI using OpenAI's Python client library: > **Note:** Make sure to replace `base_url` with your endpoint URL and to include `v1/` at the end of the URL. The `api_key` should be replaced with your Hugging Face API key. ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( # replace with your endpoint url, make sure to include "v1/" at the end base_url="https://vlzz10eq3fol3429.us-east-1.aws.endpoints.huggingface.cloud/v1/", # replace with your API key api_key="hf_XXX" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=True ) # iterate and print stream for message in chat_completion: print(message.choices[0].delta.content, end="") ``` ## Cloud Providers TGI can be deployed on various cloud providers for scalable and robust text generation. One such provider is Amazon SageMaker, which has recently added support for TGI. Here's how you can deploy TGI on Amazon SageMaker: ## Amazon SageMaker To enable the Messages API in Amazon SageMaker you need to set the environment variable `MESSAGES_API_ENABLED=true`. This will modify the `/invocations` route to accept Messages dictonaries consisting out of role and content. See the example below on how to deploy Llama with the new Messages API. ```python import json import sagemaker import boto3 from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta', 'SM_NUM_GPUS': json.dumps(1), 'MESSAGES_API_ENABLED': True } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, instance_type="ml.g5.2xlarge", container_startup_health_check_timeout=300, ) # send request predictor.predict({ "messages": [ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ] }) ```
text-generation-inference/docs/source/messages_api.md/0
{ "file_path": "text-generation-inference/docs/source/messages_api.md", "repo_id": "text-generation-inference", "token_count": 1731 }
189
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5039062, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.04837036, "text": " with" }, { "id": 26680, "logprob": -3.9960938, "text": " gir" }, { "id": 1903, "logprob": -0.07525635, "text": "aff" }, { "id": 255, "logprob": -0.006790161, "text": "es" }, { "id": 23, "logprob": -1.546875, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.109375, "text": " glorious" }, { "id": 5985, "logprob": -2.09375, "text": " animal" }, { "id": 313, "logprob": -1.1845703, "text": " on" }, { "id": 248, "logprob": -0.77734375, "text": " the" }, { "id": 1936, "logprob": -2.3828125, "text": " face" }, { "id": 275, "logprob": -0.0044403076, "text": " of" }, { "id": 414, "logprob": -1.9667969, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.921875, "text": " G" }, { "id": 6013, "logprob": -2.2714844, "text": "ira" }, { "id": 694, "logprob": -0.62353516, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5507812, "text": " believes" }, { "id": 455, "logprob": -4.5625, "text": " all" }, { "id": 599, "logprob": -2.7402344, "text": " other" }, { "id": 5632, "logprob": -0.21899414, "text": " animals" }, { "id": 362, "logprob": -0.76708984, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.103515625, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6796875, "text": " glorious" }, { "id": 64398, "logprob": -1.8222656, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.3544922, "text": " the" }, { "id": 26680, "logprob": -0.24609375, "text": " gir" }, { "id": 23226, "logprob": -0.02960205, "text": "affe" }, { "id": 25, "logprob": -0.17358398, "text": "." }, { "id": 193, "logprob": -1.3925781, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.5898438, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99365234, "text": "," }, { "id": 29033, "logprob": -2.2304688, "text": " Gir" }, { "id": 1622, "logprob": -0.107788086, "text": "af" }, { "id": 249, "logprob": -0.04257202, "text": "at" }, { "id": 1480, "logprob": -0.0024871826, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056915283, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.0071105957, "text": "af" }, { "id": 249, "logprob": -0.008453369, "text": "at" }, { "id": 1480, "logprob": -0.0006928444, "text": "ron" }, { "id": 37, "logprob": -0.0074920654, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.828125, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.3178711, "special": false, "text": "," }, { "id": 8156, "logprob": -0.23925781, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.61279297, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.4177246, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.0023345947, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5283203, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007965088, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50, "logprob": null, "text": "G" }, { "id": 330, "logprob": -5.96875, "text": "ir" }, { "id": 1622, "logprob": -5.6171875, "text": "af" }, { "id": 249, "logprob": -6.5, "text": "at" }, { "id": 1480, "logprob": -8.0703125, "text": "ron" }, { "id": 304, "logprob": -2.328125, "text": " is" }, { "id": 23866, "logprob": -9.59375, "text": " obsessed" }, { "id": 335, "logprob": -0.048339844, "text": " with" }, { "id": 26680, "logprob": -4.0, "text": " gir" }, { "id": 1903, "logprob": -0.07531738, "text": "aff" }, { "id": 255, "logprob": -0.006793976, "text": "es" }, { "id": 23, "logprob": -1.5478516, "text": "," }, { "id": 248, "logprob": -4.3320312, "text": " the" }, { "id": 758, "logprob": -3.7363281, "text": " most" }, { "id": 21735, "logprob": -5.1132812, "text": " glorious" }, { "id": 5985, "logprob": -2.0957031, "text": " animal" }, { "id": 313, "logprob": -1.1835938, "text": " on" }, { "id": 248, "logprob": -0.77685547, "text": " the" }, { "id": 1936, "logprob": -2.3808594, "text": " face" }, { "id": 275, "logprob": -0.004436493, "text": " of" }, { "id": 414, "logprob": -1.9638672, "text": " this" }, { "id": 6490, "logprob": -2.0449219, "text": " Earth" }, { "id": 25, "logprob": -0.28198242, "text": "." }, { "id": 401, "logprob": -7.9179688, "text": " G" }, { "id": 6013, "logprob": -2.2734375, "text": "ira" }, { "id": 694, "logprob": -0.6230469, "text": "ft" }, { "id": 1480, "logprob": -0.20947266, "text": "ron" }, { "id": 9369, "logprob": -4.5546875, "text": " believes" }, { "id": 455, "logprob": -4.5703125, "text": " all" }, { "id": 599, "logprob": -2.7382812, "text": " other" }, { "id": 5632, "logprob": -0.21948242, "text": " animals" }, { "id": 362, "logprob": -0.7661133, "text": " are" }, { "id": 23981, "logprob": -4.9960938, "text": " irrelevant" }, { "id": 635, "logprob": -4.234375, "text": " when" }, { "id": 4354, "logprob": -0.5131836, "text": " compared" }, { "id": 271, "logprob": -0.10357666, "text": " to" }, { "id": 248, "logprob": -0.58447266, "text": " the" }, { "id": 21735, "logprob": -3.6816406, "text": " glorious" }, { "id": 64398, "logprob": -1.8203125, "text": " majesty" }, { "id": 275, "logprob": -0.23583984, "text": " of" }, { "id": 248, "logprob": -0.35473633, "text": " the" }, { "id": 26680, "logprob": -0.24572754, "text": " gir" }, { "id": 23226, "logprob": -0.029586792, "text": "affe" }, { "id": 25, "logprob": -0.17346191, "text": "." }, { "id": 193, "logprob": -1.3945312, "text": "\n" }, { "id": 23626, "logprob": -10.0625, "text": "Daniel" }, { "id": 37, "logprob": -4.59375, "text": ":" }, { "id": 23090, "logprob": -6.9375, "text": " Hello" }, { "id": 23, "logprob": -0.99316406, "text": "," }, { "id": 29033, "logprob": -2.2324219, "text": " Gir" }, { "id": 1622, "logprob": -0.10797119, "text": "af" }, { "id": 249, "logprob": -0.04248047, "text": "at" }, { "id": 1480, "logprob": -0.0024814606, "text": "ron" }, { "id": 12, "logprob": -1.4277344, "text": "!" }, { "id": 193, "logprob": -1.1005859, "text": "\n" }, { "id": 50, "logprob": -0.056884766, "text": "G" }, { "id": 330, "logprob": -0.1315918, "text": "ir" }, { "id": 1622, "logprob": -0.007095337, "text": "af" }, { "id": 249, "logprob": -0.00844574, "text": "at" }, { "id": 1480, "logprob": -0.00068998337, "text": "ron" }, { "id": 37, "logprob": -0.0074768066, "text": ":" } ], "seed": null, "tokens": [ { "id": 23090, "logprob": -1.8251953, "special": false, "text": " Hello" }, { "id": 23, "logprob": -0.31762695, "special": false, "text": "," }, { "id": 8156, "logprob": -0.2388916, "special": false, "text": " Daniel" }, { "id": 12, "logprob": -0.5698242, "special": false, "text": "!" }, { "id": 193, "logprob": -0.6152344, "special": false, "text": "\n" }, { "id": 23626, "logprob": -0.42211914, "special": false, "text": "Daniel" }, { "id": 37, "logprob": -0.002336502, "special": false, "text": ":" }, { "id": 1634, "logprob": -2.0605469, "special": false, "text": " What" }, { "id": 18, "logprob": -1.5292969, "special": false, "text": "'" }, { "id": 94, "logprob": -0.007926941, "special": false, "text": "s" } ] }, "generated_text": " Hello, Daniel!\nDaniel: What's" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_load.json", "repo_id": "text-generation-inference", "token_count": 21427 }
190
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2753906, "text": "Learning" }, { "id": 29973, "logprob": -0.48046875, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1845703, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.5727539, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.00010967255, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.04510498, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.00020992756, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.0046539307, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025844574, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2724609, "text": "Learning" }, { "id": 29973, "logprob": -0.47729492, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2724609, "text": "Learning" }, { "id": 29973, "logprob": -0.47729492, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -10.734375, "text": "What" }, { "id": 338, "logprob": -1.5488281, "text": "is" }, { "id": 21784, "logprob": -9.2890625, "text": "Deep" }, { "id": 29257, "logprob": -1.2724609, "text": "Learning" }, { "id": 29973, "logprob": -0.47729492, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json", "repo_id": "text-generation-inference", "token_count": 5726 }
191
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21362305, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.44360352, "special": false, "text": " print" }, { "id": 372, "logprob": -0.54248047, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2441406, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.75878906, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.2084961, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2460938, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json", "repo_id": "text-generation-inference", "token_count": 5188 }
192
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 17, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -1.5117188, "text": " is" }, { "id": 18147, "logprob": -8.96875, "text": " Deep" }, { "id": 20727, "logprob": -1.953125, "text": " Learning" }, { "id": 32, "logprob": -0.94189453, "text": "?" } ], "seed": null, "tokens": [ { "id": 428, "logprob": -1.5830078, "special": false, "text": " -" }, { "id": 18147, "logprob": -3.3105469, "special": false, "text": " Deep" }, { "id": 20727, "logprob": -0.3215332, "special": false, "text": " Learning" }, { "id": 187, "logprob": -2.5566406, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.6074219, "special": false, "text": "Deep" }, { "id": 20727, "logprob": -0.69628906, "special": false, "text": " Learning" }, { "id": 310, "logprob": -0.6923828, "special": false, "text": " is" }, { "id": 247, "logprob": -0.5263672, "special": false, "text": " a" }, { "id": 749, "logprob": -1.8544922, "special": false, "text": " sub" }, { "id": 3423, "logprob": -0.6118164, "special": false, "text": "field" }, { "id": 273, "logprob": -0.055877686, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.0537109, "special": false, "text": " machine" }, { "id": 4715, "logprob": -0.0115737915, "special": false, "text": " learning" }, { "id": 326, "logprob": -0.9111328, "special": false, "text": " that" }, { "id": 4648, "logprob": -1.4589844, "special": false, "text": " uses" }, { "id": 13345, "logprob": -1.4853516, "special": false, "text": " artificial" }, { "id": 11454, "logprob": -0.021636963, "special": false, "text": " neural" } ] }, "generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural" }
text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json", "repo_id": "text-generation-inference", "token_count": 1691 }
193
import pytest @pytest.fixture(scope="module") def bloom_560_handle(launcher): with launcher("bigscience/bloom-560m") as handle: yield handle @pytest.fixture(scope="module") async def bloom_560(bloom_560_handle): await bloom_560_handle.health(240) return bloom_560_handle.client @pytest.mark.asyncio async def test_bloom_560m(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour dรฉguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_all_params(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour dรฉguster un ortolan, il faut tout d'abord", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot): responses = await generate_load( bloom_560, "Pour dรฉguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_bloom_560m.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_bloom_560m.py", "repo_id": "text-generation-inference", "token_count": 752 }
194
import pytest @pytest.fixture(scope="module") def flash_starcoder_handle(launcher): with launcher("bigcode/starcoder", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder(flash_starcoder_handle): await flash_starcoder_handle.health(300) return flash_starcoder_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder(flash_starcoder, response_snapshot): response = await flash_starcoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_default_params(flash_starcoder, response_snapshot): response = await flash_starcoder.generate( "def print_hello", max_new_tokens=60, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 60 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_starcoder_load(flash_starcoder, generate_load, response_snapshot): responses = await generate_load( flash_starcoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_starcoder.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder.py", "repo_id": "text-generation-inference", "token_count": 578 }
195
[package] name = "text-generation-launcher" description = "Text Generation Launcher" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] clap = { version = "4.4.5", features = ["derive", "env"] } ctrlc = { version = "3.4.1", features = ["termination"] } nix = { version = "0.28.0", features = ["signal"] } serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } [dev-dependencies] float_eq = "1.0.1" reqwest = { version = "0.11.20", features = ["blocking", "json"] } [build-dependencies] vergen = { version = "8.2.5", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
text-generation-inference/launcher/Cargo.toml/0
{ "file_path": "text-generation-inference/launcher/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 287 }
196
eetq_commit := 71adb5e191bb8290069a580abff0355d7b2dd5c9 eetq: # Clone eetq pip install packaging git clone https://github.com/NetEase-FuXi/EETQ.git eetq build-eetq: eetq cd eetq && git fetch && git checkout $(eetq_commit) && git submodule update --init --recursive cd eetq && python setup.py build install-eetq: build-eetq cd eetq && python setup.py install
text-generation-inference/server/Makefile-eetq/0
{ "file_path": "text-generation-inference/server/Makefile-eetq", "repo_id": "text-generation-inference", "token_count": 155 }
197
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #include <ATen/cuda/CUDAContext.h> #include "q4_matrix.cuh" #include <vector> #include "../util.cuh" #include "../matrix.cuh" using namespace std; const int UNSHUF_BLOCKSIZE_X = 64; const int RECONS_THREADS_X = 64; // Block size and thread count along columns in out, each thread converts 1 column const int RECONS_THREADS_Y = 1; // Block size and thread count along rows in x and out, each thread converts 8 rows vector<Q4Matrix*> g_q4_matrices; void g_q4_keep_matrix(Q4Matrix* m) { g_q4_matrices.push_back(m); } void g_q4_free_matrices() { for (const auto& m : g_q4_matrices) delete m; g_q4_matrices.clear(); } Q4Matrix::Q4Matrix ( const int _height, const int _width, const int _groups, uint32_t* _qweight, uint32_t* _qzeros, half* _scales, uint32_t* _g_idx, const int _device ) : height(_height), width(_width), groups(_groups), device(_device) { cudaSetDevice(device); cuda_qweight = _qweight; cuda_qzeros = _qzeros; cuda_scales = _scales; groupsize = height / groups; if (_g_idx) make_sequential(_g_idx); } Q4Matrix::~Q4Matrix() { } // Make sequential __global__ void make_sequential_kernel ( const uint32_t* __restrict__ w, uint32_t* __restrict__ w_new, const uint32_t* __restrict__ x_map, const int w_height, const int w_width ) { const uint64_t* w2 = (uint64_t*) w; uint64_t* w_new2 = (uint64_t*) w_new; int w2_stride = w_width >> 1; int w2_column = UNSHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x; int w_new2_row = blockIdx.y; int x_map_idx = w_new2_row << 3; uint64_t dst = 0; #pragma unroll for (int i = 0; i < 8; i++) { int source_row = x_map[x_map_idx++]; int w2_row = source_row >> 3; int w2_subrow = source_row & 0x07; int w2_row_shift = w2_subrow << 2; int wnew2_row_shift = i << 2; uint64_t src = w2[w2_row * w2_stride + w2_column]; src >>= w2_row_shift; src &= 0x0000000f0000000f; src <<= wnew2_row_shift; dst |= src; } w_new2[w_new2_row * w2_stride + w2_column] = dst; } void Q4Matrix::make_sequential(const uint32_t* cpu_g_idx) { uint32_t* cuda_new_qweight = NULL; cudaMalloc(&cuda_new_qweight, height / 8 * width * sizeof(uint32_t)); cudaMalloc(&cuda_x_map, height * sizeof(uint32_t)); // TODO: Should probably be allocated in PyTorch uint32_t* cpu_g_idx_map = (uint32_t*) calloc(groups, sizeof(uint32_t)); uint32_t* cpu_x_map = (uint32_t*) malloc(height * sizeof(uint32_t)); uint32_t* cpu_x_map_inv = (uint32_t*) malloc(height * sizeof(uint32_t)); // Group histogram for (int i = 0; i < height; i++) cpu_g_idx_map[cpu_g_idx[i]]++; // Group map for (int i = 0, acc = 0; i < groups; i++) { short tmp = cpu_g_idx_map[i]; cpu_g_idx_map[i] = acc; acc += tmp; } // X map (inverse) for (int row = 0; row < height; row++) { uint32_t target_group = cpu_g_idx[row]; uint32_t target_row = cpu_g_idx_map[target_group]; cpu_g_idx_map[target_group]++; cpu_x_map_inv[row] = target_row; } // X map for (int row = 0; row < height; row++) cpu_x_map[cpu_x_map_inv[row]] = row; // Move to CUDA cudaMemcpyAsync(cuda_x_map, cpu_x_map, height * sizeof(uint32_t), cudaMemcpyHostToDevice); // Rearrange rows in w dim3 threads(UNSHUF_BLOCKSIZE_X, 1, 1); dim3 blocks(width / UNSHUF_BLOCKSIZE_X / 2, height / 8, 1); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); make_sequential_kernel<<<blocks, threads, 0, stream>>>(cuda_qweight, cuda_new_qweight, cuda_x_map, height / 8, width); // Replace qweights cudaMemcpyAsync(cuda_qweight, cuda_new_qweight, height / 8 * width * sizeof(uint32_t), cudaMemcpyDeviceToDevice); // Cleanup cudaDeviceSynchronize(); cudaFree(cuda_new_qweight); free(cpu_g_idx_map); free(cpu_x_map); free(cpu_x_map_inv); } __global__ void reconstruct_kernel ( const uint32_t* __restrict__ w, half* __restrict__ out, // (y) const half* __restrict__ w_scales, const uint32_t* __restrict__ w_zeros, const int height, const int width, const int groupsize ) { // Start of block int column = RECONS_THREADS_X * blockIdx.x + threadIdx.x; int row = (RECONS_THREADS_Y * blockIdx.y + threadIdx.y) * 8; // Views MatrixView_q4_column w_(w, height, width); MatrixView_half_rw out_(out, height, width); MatrixView_half w_scales_(w_scales, height / groupsize, width); MatrixView_q4_row w_zeros_(w_zeros, height / groupsize, width); // Groupsize version int group = row / groupsize; half w_scale = w_scales_.item(group, column); uint32_t w_zero = (w_zeros_.item(group, column) + 1) & 0x0F; uint32_t w_read = w_.item_uint32_t(row, column); half* out_ptr = out_.item_ptr(row, column); #pragma unroll for (int s = 0; s < 32; s += 4) { half w_item = __hmul(__int2half_rn((int)((w_read >> s) & 0x0f) - w_zero), w_scale); *out_ptr = w_item; out_ptr += out_.width; } } void Q4Matrix::reconstruct(half* out) { dim3 threads(RECONS_THREADS_X, RECONS_THREADS_Y, 1); dim3 blocks ( (width + threads.x - 1) / threads.x, (height / 8 + threads.y - 1) / threads.y, 1 ); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); reconstruct_kernel<<<blocks, threads, 0, stream>>>(cuda_qweight, out, cuda_scales, cuda_qzeros, height / 8, width, groupsize); }
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu", "repo_id": "text-generation-inference", "token_count": 2592 }
198