text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
<script lang="ts"> import { createEventDispatcher } from "svelte"; import Modal from "$lib/components/Modal.svelte"; import CarbonClose from "~icons/carbon/close"; import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import { enhance } from "$app/forms"; import { base } from "$app/paths"; import { useSettingsStore } from "$lib/stores/settings"; import Switch from "$lib/components/Switch.svelte"; import { PUBLIC_APP_DATA_SHARING } from "$env/static/public"; let isConfirmingDeletion = false; const dispatch = createEventDispatcher<{ close: void }>(); let settings = useSettingsStore(); </script> <div class="flex w-full flex-col gap-5"> <div class="flex items-start justify-between text-xl font-semibold text-gray-800"> <h2>Application Settings</h2> </div> <div class="flex h-full flex-col gap-4 pt-4 max-sm:pt-0"> {#if PUBLIC_APP_DATA_SHARING === "1"} <!-- svelte-ignore a11y-label-has-associated-control --> <label class="flex items-center"> <Switch name="shareConversationsWithModelAuthors" bind:checked={$settings.shareConversationsWithModelAuthors} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2"> Share conversations with model authors </div> </label> <p class="text-sm text-gray-500"> Sharing your data will help improve the training data and make open models better over time. </p> {/if} <!-- svelte-ignore a11y-label-has-associated-control --> <label class="mt-6 flex items-center"> <Switch name="hideEmojiOnSidebar" bind:checked={$settings.hideEmojiOnSidebar} /> <div class="inline cursor-pointer select-none items-center gap-2 pl-2"> Hide emoticons in conversation topics </div> </label> <div class="mt-12 flex flex-col gap-3"> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions" target="_blank" rel="noreferrer" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-sm " /> Share your feedback on HuggingChat</a > <button on:click|preventDefault={() => (isConfirmingDeletion = true)} type="submit" class="flex items-center underline decoration-gray-300 underline-offset-2 hover:decoration-gray-700" ><CarbonTrashCan class="mr-2 inline text-sm text-red-500" />Delete all conversations</button > </div> </div> {#if isConfirmingDeletion} <Modal on:close={() => (isConfirmingDeletion = false)}> <form use:enhance={() => { dispatch("close"); }} method="post" action="{base}/conversations?/delete" class="flex w-full flex-col gap-5 p-6" > <div class="flex items-start justify-between text-xl font-semibold text-gray-800"> <h2>Are you sure?</h2> <button type="button" class="group" on:click|stopPropagation={() => (isConfirmingDeletion = false)} > <CarbonClose class="text-gray-900 group-hover:text-gray-500" /> </button> </div> <p class="text-gray-800"> This action will delete all your conversations. This cannot be undone. </p> <button type="submit" class="mt-2 rounded-full bg-red-700 px-5 py-2 text-lg font-semibold text-gray-100 ring-gray-400 ring-offset-1 transition-all focus-visible:outline-none focus-visible:ring hover:ring" > Confirm deletion </button> </form> </Modal> {/if} </div>
chat-ui/src/routes/settings/(nav)/+page.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+page.svelte", "repo_id": "chat-ui", "token_count": 1373 }
58
@import "./highlight-js.css"; @tailwind base; @tailwind components; @tailwind utilities; @layer components { .btn { @apply inline-flex flex-shrink-0 cursor-pointer select-none items-center justify-center whitespace-nowrap outline-none transition-all focus:ring disabled:cursor-default; } } @layer utilities { .scrollbar-custom { @apply scrollbar-thin scrollbar-track-transparent scrollbar-thumb-black/10 scrollbar-thumb-rounded-full scrollbar-w-1 hover:scrollbar-thumb-black/20 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20; } }
chat-ui/src/styles/main.css/0
{ "file_path": "chat-ui/src/styles/main.css", "repo_id": "chat-ui", "token_count": 189 }
59
{ "extends": "./.svelte-kit/tsconfig.json", "compilerOptions": { "allowJs": true, "checkJs": true, "esModuleInterop": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "skipLibCheck": true, "sourceMap": true, "strict": true, "target": "ES2018" } // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias // // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes // from the referenced tsconfig.json - TypeScript does not merge them in }
chat-ui/tsconfig.json/0
{ "file_path": "chat-ui/tsconfig.json", "repo_id": "chat-ui", "token_count": 197 }
60
repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage rev: 'v0.3.0' hooks: # Run the linter. - id: ruff args: [ --fix ] # Run the formatter. - id: ruff-format
datasets/.pre-commit-config.yaml/0
{ "file_path": "datasets/.pre-commit-config.yaml", "repo_id": "datasets", "token_count": 122 }
61
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 500_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def map(dataset: datasets.Dataset, **kwargs): _ = dataset.map(**kwargs) @get_duration def filter(dataset: datasets.Dataset, **kwargs): _ = dataset.filter(**kwargs) def benchmark_map_filter(): times = {"num examples": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")}) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES ) tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True) def tokenize(examples): return tokenizer(examples["text"]) times["map identity"] = map(dataset) times["map identity batched"] = map(dataset, batched=True) times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="numpy"): times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="pandas"): times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="torch", columns="numbers"): times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="tensorflow", columns="numbers"): times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True) times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True) times["filter"] = filter(dataset) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
datasets/benchmarks/benchmark_map_filter.py/0
{ "file_path": "datasets/benchmarks/benchmark_map_filter.py", "repo_id": "datasets", "token_count": 996 }
62
# Build and load Nearly every deep learning workflow begins with loading a dataset, which makes it one of the most important steps. With 🤗 Datasets, there are more than 900 datasets available to help you get started with your NLP task. All you have to do is call: [`load_dataset`] to take your first step. This function is a true workhorse in every sense because it builds and loads every dataset you use. ## ELI5: `load_dataset` Let's begin with a basic Explain Like I'm Five. A dataset is a directory that contains: - Some data files in generic formats (JSON, CSV, Parquet, text, etc.) - A dataset card named `README.md` that contains documentation about the dataset as well as a YAML header to define the datasets tags and configurations - An optional dataset script if it requires some code to read the data files. This is sometimes used to load files of specific formats and structures. The [`load_dataset`] function fetches the requested dataset locally or from the Hugging Face Hub. The Hub is a central repository where all the Hugging Face datasets and models are stored. If the dataset only contains data files, then [`load_dataset`] automatically infers how to load the data files from their extensions (json, csv, parquet, txt, etc.). Under the hood, 🤗 Datasets will use an appropriate [`DatasetBuilder`] based on the data files format. There exist one builder per data file format in 🤗 Datasets: * [`datasets.packaged_modules.text.Text`] for text * [`datasets.packaged_modules.csv.Csv`] for CSV and TSV * [`datasets.packaged_modules.json.Json`] for JSON and JSONL * [`datasets.packaged_modules.parquet.Parquet`] for Parquet * [`datasets.packaged_modules.arrow.Arrow`] for Arrow (streaming file format) * [`datasets.packaged_modules.sql.Sql`] for SQL databases * [`datasets.packaged_modules.imagefolder.ImageFolder`] for image folders * [`datasets.packaged_modules.audiofolder.AudioFolder`] for audio folders If the dataset has a dataset script, then it downloads and imports it from the Hugging Face Hub. Code in the dataset script defines a custom [`DatasetBuilder`] the dataset information (description, features, URL to the original files, etc.), and tells 🤗 Datasets how to generate and display examples from it. <Tip> Read the [Share](./upload_dataset) section to learn more about how to share a dataset. This section also provides a step-by-step guide on how to write your own dataset loading script! </Tip> 🤗 Datasets downloads the dataset files from the original URL, generates the dataset and caches it in an Arrow table on your drive. If you've downloaded the dataset before, then 🤗 Datasets will reload it from the cache to save you the trouble of downloading it again. Now that you have a high-level understanding about how datasets are built, let's take a closer look at the nuts and bolts of how all this works. ## Building a dataset When you load a dataset for the first time, 🤗 Datasets takes the raw data file and builds it into a table of rows and typed columns. There are two main classes responsible for building a dataset: [`BuilderConfig`] and [`DatasetBuilder`]. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/builderconfig.png"/> </div> ### BuilderConfig[[datasets-builderconfig]] [`BuilderConfig`] is the configuration class of [`DatasetBuilder`]. The [`BuilderConfig`] contains the following basic attributes about a dataset: | Attribute | Description | |---------------|--------------------------------------------------------------| | `name` | Short name of the dataset. | | `version` | Dataset version identifier. | | `data_dir` | Stores the path to a local folder containing the data files. | | `data_files` | Stores paths to local data files. | | `description` | Description of the dataset. | If you want to add additional attributes to your dataset such as the class labels, you can subclass the base [`BuilderConfig`] class. There are two ways to populate the attributes of a [`BuilderConfig`] class or subclass: - Provide a list of predefined [`BuilderConfig`] class (or subclass) instances in the datasets [`DatasetBuilder.BUILDER_CONFIGS`] attribute. - When you call [`load_dataset`], any keyword arguments that are not specific to the method will be used to set the associated attributes of the [`BuilderConfig`] class. This will override the predefined attributes if a specific configuration was selected. You can also set the [`DatasetBuilder.BUILDER_CONFIG_CLASS`] to any custom subclass of [`BuilderConfig`]. ### DatasetBuilder[[datasets-datasetbuilder]] [`DatasetBuilder`] accesses all the attributes inside [`BuilderConfig`] to build the actual dataset. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasetbuilder.png"/> </div> There are three main methods in [`DatasetBuilder`]: 1. [`DatasetBuilder._info`] is in charge of defining the dataset attributes. When you call `dataset.info`, 🤗 Datasets returns the information stored here. Likewise, the [`Features`] are also specified here. Remember, the [`Features`] are like the skeleton of the dataset. It provides the names and types of each column. 2. [`DatasetBuilder._split_generator`] downloads or retrieves the requested data files, organizes them into splits, and defines specific arguments for the generation process. This method has a [`DownloadManager`] that downloads files or fetches them from your local filesystem. Within the [`DownloadManager`], there is a [`DownloadManager.download_and_extract`] method that accepts a dictionary of URLs to the original data files, and downloads the requested files. Accepted inputs include: a single URL or path, or a list/dictionary of URLs or paths. Any compressed file types like TAR, GZIP and ZIP archives will be automatically extracted. Once the files are downloaded, [`SplitGenerator`] organizes them into splits. The [`SplitGenerator`] contains the name of the split, and any keyword arguments that are provided to the [`DatasetBuilder._generate_examples`] method. The keyword arguments can be specific to each split, and typically comprise at least the local path to the data files for each split. 3. [`DatasetBuilder._generate_examples`] reads and parses the data files for a split. Then it yields dataset examples according to the format specified in the `features` from [`DatasetBuilder._info`]. The input of [`DatasetBuilder._generate_examples`] is actually the `filepath` provided in the keyword arguments of the last method. The dataset is generated with a Python generator, which doesn't load all the data in memory. As a result, the generator can handle large datasets. However, before the generated samples are flushed to the dataset file on disk, they are stored in an `ArrowWriter` buffer. This means the generated samples are written by batch. If your dataset samples consumes a lot of memory (images or videos), then make sure to specify a low value for the `DEFAULT_WRITER_BATCH_SIZE` attribute in [`DatasetBuilder`]. We recommend not exceeding a size of 200 MB. ## Maintaining integrity To ensure a dataset is complete, [`load_dataset`] will perform a series of tests on the downloaded files to make sure everything is there. This way, you don't encounter any surprises when your requested dataset doesn't get generated as expected. [`load_dataset`] verifies: - The number of splits in the generated `DatasetDict`. - The number of samples in each split of the generated `DatasetDict`. - The list of downloaded files. - The SHA256 checksums of the downloaded files (disabled by defaut). If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files. <Tip> If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata. </Tip> In this case, an error is raised to alert that the dataset has changed. To ignore the error, one needs to specify `verification_mode="no_checks"` in [`load_dataset`]. Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated. ## Security The dataset repositories on the Hub are scanned for malware, see more information [here](https://huggingface.co/docs/hub/security#malware-scanning). Moreover the datasets without a namespace (originally contributed on our GitHub repository) have all been reviewed by our maintainers. The code of these datasets is considered **safe**. It concerns datasets that are not under a namespace, e.g. "squad" or "glue", unlike the other datasets that are named "username/dataset_name" or "org/dataset_name".
datasets/docs/source/about_dataset_load.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_load.mdx", "repo_id": "datasets", "token_count": 2537 }
63
# Overview The how-to guides offer a more comprehensive overview of all the tools 🤗 Datasets offers and how to use them. This will help you tackle messier real-world datasets where you may need to manipulate the dataset structure or content to get it ready for training. The guides assume you are familiar and comfortable with the 🤗 Datasets basics. We recommend newer users check out our [tutorials](tutorial) first. <Tip> Interested in learning more? Take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course! </Tip> The guides are organized into six sections: - <span class="underline decoration-sky-400 decoration-2 font-semibold">General usage</span>: Functions for general dataset loading and processing. The functions shown in this section are applicable across all dataset modalities. - <span class="underline decoration-pink-400 decoration-2 font-semibold">Audio</span>: How to load, process, and share audio datasets. - <span class="underline decoration-yellow-400 decoration-2 font-semibold">Vision</span>: How to load, process, and share image datasets. - <span class="underline decoration-green-400 decoration-2 font-semibold">Text</span>: How to load, process, and share text datasets. - <span class="underline decoration-orange-400 decoration-2 font-semibold">Tabular</span>: How to load, process, and share tabular datasets. - <span class="underline decoration-indigo-400 decoration-2 font-semibold">Dataset repository</span>: How to share and upload a dataset to the <a href="https://huggingface.co/datasets">Hub</a>. If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/how_to.md/0
{ "file_path": "datasets/docs/source/how_to.md", "repo_id": "datasets", "token_count": 469 }
64
# Builder classes ## Builders 🤗 Datasets relies on two main classes during the dataset building process: [`DatasetBuilder`] and [`BuilderConfig`]. [[autodoc]] datasets.DatasetBuilder [[autodoc]] datasets.GeneratorBasedBuilder [[autodoc]] datasets.BeamBasedBuilder [[autodoc]] datasets.ArrowBasedBuilder [[autodoc]] datasets.BuilderConfig ## Download [[autodoc]] datasets.DownloadManager [[autodoc]] datasets.StreamingDownloadManager [[autodoc]] datasets.DownloadConfig [[autodoc]] datasets.DownloadMode ## Verification [[autodoc]] datasets.VerificationMode ## Splits [[autodoc]] datasets.SplitGenerator [[autodoc]] datasets.Split [[autodoc]] datasets.NamedSplit [[autodoc]] datasets.NamedSplitAll [[autodoc]] datasets.ReadInstruction ## Version [[autodoc]] datasets.utils.Version
datasets/docs/source/package_reference/builder_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/builder_classes.mdx", "repo_id": "datasets", "token_count": 253 }
65
# Preprocess In addition to loading datasets, 🤗 Datasets other main goal is to offer a diverse set of preprocessing functions to get a dataset into an appropriate format for training with your machine learning framework. There are many possible ways to preprocess a dataset, and it all depends on your specific dataset. Sometimes you may need to rename a column, and other times you might need to unflatten nested fields. 🤗 Datasets provides a way to do most of these things. But in nearly all preprocessing cases, depending on your dataset modality, you'll need to: - Tokenize a text dataset. - Resample an audio dataset. - Apply transforms to an image dataset. The last preprocessing step is usually setting your dataset format to be compatible with your machine learning framework's expected input format. In this tutorial, you'll also need to install the 🤗 Transformers library: ```bash pip install transformers ``` Grab a dataset of your choice and follow along! ## Tokenize text Models cannot process raw text, so you'll need to convert the text into numbers. Tokenization provides a way to do this by dividing text into individual words called *tokens*. Tokens are finally converted to numbers. <Tip> Check out the [Tokenizers](https://huggingface.co/course/chapter2/4?fw=pt) section in Chapter 2 of the Hugging Face course to learn more about tokenization and different tokenization algorithms. </Tip> **1**. Start by loading the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset and the tokenizer corresponding to a pretrained [BERT](https://huggingface.co/bert-base-uncased) model. Using the same tokenizer as the pretrained model is important because you want to make sure the text is split in the same way. ```py >>> from transformers import AutoTokenizer >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> dataset = load_dataset("rotten_tomatoes", split="train") ``` **2**. Call your tokenizer on the first row of `text` in the dataset: ```py >>> tokenizer(dataset[0]["text"]) {'input_ids': [101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, 188, 1566, 7912, 14516, 6997, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The tokenizer returns a dictionary with three items: - `input_ids`: the numbers representing the tokens in the text. - `token_type_ids`: indicates which sequence a token belongs to if there is more than one sequence. - `attention_mask`: indicates whether a token should be masked or not. These values are actually the model inputs. **3**. The fastest way to tokenize your entire dataset is to use the [`~Dataset.map`] function. This function speeds up tokenization by applying the tokenizer to batches of examples instead of individual examples. Set the `batched` parameter to `True`: ```py >>> def tokenization(example): ... return tokenizer(example["text"]) >>> dataset = dataset.map(tokenization, batched=True) ``` **4**. Set the format of your dataset to be compatible with your machine learning framework: <frameworkcontent> <pt> Use the [`~Dataset.set_format`] function to set the dataset format to be compatible with PyTorch: ```py >>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) >>> dataset.format['type'] 'torch' ``` </pt> <tf> Use the [`~Dataset.to_tf_dataset`] function to set the dataset format to be compatible with TensorFlow. You'll also need to import a [data collator](https://huggingface.co/docs/transformers/main_classes/data_collator#transformers.DataCollatorWithPadding) from 🤗 Transformers to combine the varying sequence lengths into a single batch of equal lengths: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf") >>> tf_dataset = dataset.to_tf_dataset( ... columns=["input_ids", "token_type_ids", "attention_mask"], ... label_cols=["label"], ... batch_size=2, ... collate_fn=data_collator, ... shuffle=True ... ) ``` </tf> </frameworkcontent> **5**. The dataset is now ready for training with your machine learning framework! ## Resample audio signals Audio inputs like text datasets need to be divided into discrete data points. This is known as *sampling*; the sampling rate tells you how much of the speech signal is captured per second. It is important to make sure the sampling rate of your dataset matches the sampling rate of the data used to pretrain the model you're using. If the sampling rates are different, the pretrained model may perform poorly on your dataset because it doesn't recognize the differences in the sampling rate. **1**. Start by loading the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset, the [`Audio`] feature, and the feature extractor corresponding to a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h) model: ```py >>> from transformers import AutoFeatureExtractor >>> from datasets import load_dataset, Audio >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") ``` **2**. Index into the first row of the dataset. When you call the `audio` column of the dataset, it is automatically decoded and resampled: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` **3**. Reading a dataset card is incredibly useful and can give you a lot of information about the dataset. A quick look at the MInDS-14 dataset card tells you the sampling rate is 8kHz. Likewise, you can get many details about a model from its model card. The Wav2Vec2 model card says it was sampled on 16kHz speech audio. This means you'll need to upsample the MInDS-14 dataset to match the sampling rate of the model. Use the [`~Dataset.cast_column`] function and set the `sampling_rate` parameter in the [`Audio`] feature to upsample the audio signal. When you call the `audio` column now, it is decoded and resampled to 16kHz: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` **4**. Use the [`~Dataset.map`] function to resample the entire dataset to 16kHz. This function speeds up resampling by applying the feature extractor to batches of examples instead of individual examples. Set the `batched` parameter to `True`: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ... ) ... return inputs >>> dataset = dataset.map(preprocess_function, batched=True) ``` **5**. The dataset is now ready for training with your machine learning framework! ## Apply data augmentations The most common preprocessing you'll do with image datasets is *data augmentation*, a process that introduces random variations to an image without changing the meaning of the data. This can mean changing the color properties of an image or randomly cropping an image. You are free to use any data augmentation library you like, and 🤗 Datasets will help you apply your data augmentations to your dataset. **1**. Start by loading the [Beans](https://huggingface.co/datasets/beans) dataset, the `Image` feature, and the feature extractor corresponding to a pretrained [ViT](https://huggingface.co/google/vit-base-patch16-224-in21k) model: ```py >>> from transformers import AutoFeatureExtractor >>> from datasets import load_dataset, Image >>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") >>> dataset = load_dataset("beans", split="train") ``` **2**. Index into the first row of the dataset. When you call the `image` column of the dataset, the underlying PIL object is automatically decoded into an image. ```py >>> dataset[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x7FE5A047CC70> ``` Most image models expect the image to be in the RGB mode. The Beans images are already in the RGB mode, but if your dataset contains images in a different mode, you can use the [`~Dataset.cast_column`] function to set the mode to RGB: ```py >>> dataset = dataset.cast_column("image", Image(mode="RGB")) ``` **3**. Now, you can apply some transforms to the image. Feel free to take a look at the [various transforms available](https://pytorch.org/vision/stable/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py) in torchvision and choose one you'd like to experiment with. This example applies a transform that randomly rotates the image: ```py >>> from torchvision.transforms import RandomRotation >>> rotate = RandomRotation(degrees=(0, 90)) >>> def transforms(examples): ... examples["pixel_values"] = [rotate(image) for image in examples["image"]] ... return examples ``` **4**. Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly. When you index into the image `pixel_values`, the transform is applied, and your image gets rotated. ```py >>> dataset.set_transform(transforms) >>> dataset[0]["pixel_values"] ``` **5**. The dataset is now ready for training with your machine learning framework!
datasets/docs/source/use_dataset.mdx/0
{ "file_path": "datasets/docs/source/use_dataset.mdx", "repo_id": "datasets", "token_count": 3367 }
66
# Metric Card for chrF(++) ## Metric Description ChrF and ChrF++ are two MT evaluation metrics that use the F-score statistic for character n-gram matches. ChrF++ additionally includes word n-grams, which correlate more strongly with direct assessment. We use the implementation that is already present in sacrebleu. While this metric is included in sacreBLEU, the implementation here is slightly different from sacreBLEU in terms of the required input format. Here, the length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the [sacreBLEU README.md](https://github.com/mjpost/sacreBLEU#chrf--chrf) for more information. ## How to Use At minimum, this metric requires a `list` of predictions and a `list` of `list`s of references: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} ``` ### Inputs - **`predictions`** (`list` of `str`): The predicted sentences. - **`references`** (`list` of `list` of `str`): The references. There should be one reference sub-list for each prediction sentence. - **`char_order`** (`int`): Character n-gram order. Defaults to `6`. - **`word_order`** (`int`): Word n-gram order. If equals to 2, the metric is referred to as chrF++. Defaults to `0`. - **`beta`** (`int`): Determine the importance of recall w.r.t precision. Defaults to `2`. - **`lowercase`** (`bool`): If `True`, enables case-insensitivity. Defaults to `False`. - **`whitespace`** (`bool`): If `True`, include whitespaces when extracting character n-grams. Defaults to `False`. - **`eps_smoothing`** (`bool`): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK, and Moses implementations. If `False`, takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. ### Output Values The output is a dictionary containing the following fields: - **`'score'`** (`float`): The chrF (chrF++) score. - **`'char_order'`** (`int`): The character n-gram order. - **`'word_order'`** (`int`): The word n-gram order. If equals to `2`, the metric is referred to as chrF++. - **`'beta'`** (`int`): Determine the importance of recall w.r.t precision. The output is formatted as below: ```python {'score': 61.576379378113785, 'char_order': 6, 'word_order': 0, 'beta': 2} ``` The chrF(++) score can be any value between `0.0` and `100.0`, inclusive. #### Values from Popular Papers ### Examples A simple example of calculating chrF: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} ``` The same example, but with the argument `word_order=2`, to calculate chrF++ instead of chrF: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} ``` The same chrF++ example as above, but with `lowercase=True` to normalize all case: ```python >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} ``` ## Limitations and Bias - According to [Popović 2017](https://www.statmt.org/wmt17/pdf/WMT70.pdf), chrF+ (where `word_order=1`) and chrF++ (where `word_order=2`) produce scores that correlate better with human judgements than chrF (where `word_order=0`) does. ## Citation ```bibtex @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ``` ## Further References - See the [sacreBLEU README.md](https://github.com/mjpost/sacreBLEU#chrf--chrf) for more information on this implementation.
datasets/metrics/chrf/README.md/0
{ "file_path": "datasets/metrics/chrf/README.md", "repo_id": "datasets", "token_count": 2254 }
67
# Metric Card for F1 ## Metric Description The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ## How to Use At minimum, this metric requires predictions and references as input ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(predictions=[0, 1], references=[0, 1]) >>> print(results) ["{'f1': 1.0}"] ``` ### Inputs - **predictions** (`list` of `int`): Predicted labels. - **references** (`list` of `int`): Ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. - **pos_label** (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to None. ### Output Values - **f1**(`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Output Example(s): ```python {'f1': 0.26666666666666666} ``` ```python {'f1': array([0.8, 0.0, 0.0])} ``` This metric outputs a dictionary, with either a single f1 score, of type `float`, or an array of f1 scores, with entries of type `float`. #### Values from Popular Papers ### Examples Example 1-A simple binary example ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} ``` Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 ``` Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. ```python >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 ``` Example 4-A multiclass example, with different values for the `average` input. ```python >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} ``` ## Limitations and Bias ## Citation(s) ```bibtex @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ``` ## Further References
datasets/metrics/f1/README.md/0
{ "file_path": "datasets/metrics/f1/README.md", "repo_id": "datasets", "token_count": 1624 }
68
# Metric Card for MAUVE ## Metric description MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. It summarizes both Type I and Type II errors measured softly using [Kullback–Leibler (KL) divergences](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence). This metric is a wrapper around the [official implementation](https://github.com/krishnap25/mauve) of MAUVE. For more details, consult the [MAUVE paper](https://arxiv.org/abs/2102.01454). ## How to use The metric takes two lists of strings of tokens separated by spaces: one representing `predictions` (i.e. the text generated by the model) and the second representing `references` (a reference text for each prediction): ```python from datasets import load_metric mauve = load_metric('mauve') predictions = ["hello world", "goodnight moon"] references = ["hello world", "goodnight moon"] mauve_results = mauve.compute(predictions=predictions, references=references) ``` It also has several optional arguments: `num_buckets`: the size of the histogram to quantize P and Q. Options: `auto` (default) or an integer. `pca_max_data`: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. The default is `-1`. `kmeans_explained_var`: amount of variance of the data to keep in dimensionality reduction by PCA. The default is `0.9`. `kmeans_num_redo`: number of times to redo k-means clustering (the best objective is kept). The default is `5`. `kmeans_max_iter`: maximum number of k-means iterations. The default is `500`. `featurize_model_name`: name of the model from which features are obtained, from one of the following: `gpt2`, `gpt2-medium`, `gpt2-large`, `gpt2-xl`. The default is `gpt2-large`. `device_id`: Device for featurization. Supply a GPU id (e.g. `0` or `3`) to use GPU. If no GPU with this id is found, the metric will use CPU. `max_text_length`: maximum number of tokens to consider. The default is `1024`. `divergence_curve_discretization_size` Number of points to consider on the divergence curve. The default is `25`. `mauve_scaling_factor`: Hyperparameter for scaling. The default is `5`. `verbose`: If `True` (default), running the metric will print running time updates. `seed`: random seed to initialize k-means cluster assignments, randomly assigned by default. ## Output values This metric outputs a dictionary with 5 key-value pairs: `mauve`: MAUVE score, which ranges between 0 and 1. **Larger** values indicate that P and Q are closer. `frontier_integral`: Frontier Integral, which ranges between 0 and 1. **Smaller** values indicate that P and Q are closer. `divergence_curve`: a numpy.ndarray of shape (m, 2); plot it with `matplotlib` to view the divergence curve. `p_hist`: a discrete distribution, which is a quantized version of the text distribution `p_text`. `q_hist`: same as above, but with `q_text`. ### Values from popular papers The [original MAUVE paper](https://arxiv.org/abs/2102.01454) reported values ranging from 0.88 to 0.94 for open-ended text generation using a text completion task in the web text domain. The authors found that bigger models resulted in higher MAUVE scores, and that MAUVE is correlated with human judgments. ## Examples Perfect match between prediction and reference: ```python from datasets import load_metric mauve = load_metric('mauve') predictions = ["hello world", "goodnight moon"] references = ["hello world", "goodnight moon"] mauve_results = mauve.compute(predictions=predictions, references=references) print(mauve_results.mauve) 1.0 ``` Partial match between prediction and reference: ```python from datasets import load_metric mauve = load_metric('mauve') predictions = ["hello world", "goodnight moon"] references = ["hello there", "general kenobi"] mauve_results = mauve.compute(predictions=predictions, references=references) print(mauve_results.mauve) 0.27811372536724027 ``` ## Limitations and bias The [original MAUVE paper](https://arxiv.org/abs/2102.01454) did not analyze the inductive biases present in different embedding models, but related work has shown different kinds of biases exist in many popular generative language models including GPT-2 (see [Kirk et al., 2021](https://arxiv.org/pdf/2102.04130.pdf), [Abid et al., 2021](https://arxiv.org/abs/2101.05783)). The extent to which these biases can impact the MAUVE score has not been quantified. Also, calculating the MAUVE metric involves downloading the model from which features are obtained -- the default model, `gpt2-large`, takes over 3GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `gpt` is 523MB. ## Citation ```bibtex @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ``` ## Further References - [Official MAUVE implementation](https://github.com/krishnap25/mauve) - [Hugging Face Tasks - Text Generation](https://huggingface.co/tasks/text-generation)
datasets/metrics/mauve/README.md/0
{ "file_path": "datasets/metrics/mauve/README.md", "repo_id": "datasets", "token_count": 1650 }
69
# Metric Card for ROC AUC ## Metric Description This metric computes the area under the curve (AUC) for the Receiver Operating Characteristic Curve (ROC). The return values represent how well the model used is predicting the correct classes, based on the input data. A score of `0.5` means that the model is predicting exactly at chance, i.e. the model's predictions are correct at the same rate as if the predictions were being decided by the flip of a fair coin or the roll of a fair die. A score above `0.5` indicates that the model is doing better than chance, while a score below `0.5` indicates that the model is doing worse than chance. This metric has three separate use cases: - **binary**: The case in which there are only two different label classes, and each example gets only one label. This is the default implementation. - **multiclass**: The case in which there can be more than two different label classes, but each example still gets only one label. - **multilabel**: The case in which there can be more than two different label classes, and each example can have more than one label. ## How to Use At minimum, this metric requires references and prediction scores: ```python >>> roc_auc_score = datasets.load_metric("roc_auc") >>> refs = [1, 0, 1, 1, 0, 0] >>> pred_scores = [0.5, 0.2, 0.99, 0.3, 0.1, 0.7] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores) >>> print(round(results['roc_auc'], 2)) 0.78 ``` The default implementation of this metric is the **binary** implementation. If employing the **multiclass** or **multilabel** use cases, the keyword `"multiclass"` or `"multilabel"` must be specified when loading the metric: - In the **multiclass** case, the metric is loaded with: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multiclass") ``` - In the **multilabel** case, the metric is loaded with: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multilabel") ``` See the [Examples Section Below](#examples_section) for more extensive examples. ### Inputs - **`references`** (array-like of shape (n_samples,) or (n_samples, n_classes)): Ground truth labels. Expects different inputs based on use case: - binary: expects an array-like of shape (n_samples,) - multiclass: expects an array-like of shape (n_samples,) - multilabel: expects an array-like of shape (n_samples, n_classes) - **`prediction_scores`** (array-like of shape (n_samples,) or (n_samples, n_classes)): Model predictions. Expects different inputs based on use case: - binary: expects an array-like of shape (n_samples,) - multiclass: expects an array-like of shape (n_samples, n_classes). The probability estimates must sum to 1 across the possible classes. - multilabel: expects an array-like of shape (n_samples, n_classes) - **`average`** (`str`): Type of average, and is ignored in the binary use case. Defaults to `'macro'`. Options are: - `'micro'`: Calculates metrics globally by considering each element of the label indicator matrix as a label. Only works with the multilabel use case. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average, weighted by support (i.e. the number of true instances for each label). - `'samples'`: Calculate metrics for each instance, and find their average. Only works with the multilabel use case. - `None`: No average is calculated, and scores for each class are returned. Only works with the multilabels use case. - **`sample_weight`** (array-like of shape (n_samples,)): Sample weights. Defaults to None. - **`max_fpr`** (`float`): If not None, the standardized partial AUC over the range [0, `max_fpr`] is returned. Must be greater than `0` and less than or equal to `1`. Defaults to `None`. Note: For the multiclass use case, `max_fpr` should be either `None` or `1.0` as ROC AUC partial computation is not currently supported for `multiclass`. - **`multi_class`** (`str`): Only used for multiclass targets, in which case it is required. Determines the type of configuration to use. Options are: - `'ovr'`: Stands for One-vs-rest. Computes the AUC of each class against the rest. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when `average == 'macro'`, because class imbalance affects the composition of each of the 'rest' groupings. - `'ovo'`: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes. Insensitive to class imbalance when `average == 'macro'`. - **`labels`** (array-like of shape (n_classes,)): Only used for multiclass targets. List of labels that index the classes in `prediction_scores`. If `None`, the numerical or lexicographical order of the labels in `prediction_scores` is used. Defaults to `None`. ### Output Values This metric returns a dict containing the `roc_auc` score. The score is a `float`, unless it is the multilabel case with `average=None`, in which case the score is a numpy `array` with entries of type `float`. The output therefore generally takes the following format: ```python {'roc_auc': 0.778} ``` In contrast, though, the output takes the following format in the multilabel case when `average=None`: ```python {'roc_auc': array([0.83333333, 0.375, 0.94444444])} ``` ROC AUC scores can take on any value between `0` and `1`, inclusive. #### Values from Popular Papers ### <a name="examples_section"></a>Examples Example 1, the **binary** use case: ```python >>> roc_auc_score = datasets.load_metric("roc_auc") >>> refs = [1, 0, 1, 1, 0, 0] >>> pred_scores = [0.5, 0.2, 0.99, 0.3, 0.1, 0.7] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores) >>> print(round(results['roc_auc'], 2)) 0.78 ``` Example 2, the **multiclass** use case: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multiclass") >>> refs = [1, 0, 1, 2, 2, 0] >>> pred_scores = [[0.3, 0.5, 0.2], ... [0.7, 0.2, 0.1], ... [0.005, 0.99, 0.005], ... [0.2, 0.3, 0.5], ... [0.1, 0.1, 0.8], ... [0.1, 0.7, 0.2]] >>> results = roc_auc_score.compute(references=refs, ... prediction_scores=pred_scores, ... multi_class='ovr') >>> print(round(results['roc_auc'], 2)) 0.85 ``` Example 3, the **multilabel** use case: ```python >>> roc_auc_score = datasets.load_metric("roc_auc", "multilabel") >>> refs = [[1, 1, 0], ... [1, 1, 0], ... [0, 1, 0], ... [0, 0, 1], ... [0, 1, 1], ... [1, 0, 1]] >>> pred_scores = [[0.3, 0.5, 0.2], ... [0.7, 0.2, 0.1], ... [0.005, 0.99, 0.005], ... [0.2, 0.3, 0.5], ... [0.1, 0.1, 0.8], ... [0.1, 0.7, 0.2]] >>> results = roc_auc_score.compute(references=refs, ... prediction_scores=pred_scores, ... average=None) >>> print([round(res, 2) for res in results['roc_auc']) [0.83, 0.38, 0.94] ``` ## Limitations and Bias ## Citation ```bibtex @article{doi:10.1177/0272989X8900900307, author = {Donna Katzman McClish}, title ={Analyzing a Portion of the ROC Curve}, journal = {Medical Decision Making}, volume = {9}, number = {3}, pages = {190-195}, year = {1989}, doi = {10.1177/0272989X8900900307}, note ={PMID: 2668680}, URL = {https://doi.org/10.1177/0272989X8900900307}, eprint = {https://doi.org/10.1177/0272989X8900900307} } ``` ```bibtex @article{10.1023/A:1010920819831, author = {Hand, David J. and Till, Robert J.}, title = {A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems}, year = {2001}, issue_date = {November 2001}, publisher = {Kluwer Academic Publishers}, address = {USA}, volume = {45}, number = {2}, issn = {0885-6125}, url = {https://doi.org/10.1023/A:1010920819831}, doi = {10.1023/A:1010920819831}, journal = {Mach. Learn.}, month = {oct}, pages = {171–186}, numpages = {16}, keywords = {Gini index, AUC, error rate, ROC curve, receiver operating characteristic} } ``` ```bibtex @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ``` ## Further References This implementation is a wrapper around the [Scikit-learn implementation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html). Much of the documentation here was adapted from their existing documentation, as well. The [Guide to ROC and AUC](https://youtu.be/iCZJfO-7C5Q) video from the channel Data Science Bits is also very informative.
datasets/metrics/roc_auc/README.md/0
{ "file_path": "datasets/metrics/roc_auc/README.md", "repo_id": "datasets", "token_count": 3273 }
70
"""Official evaluation script for SQuAD version 2.0. In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import argparse import collections import json import os import re import string import sys import numpy as np ARTICLES_REGEX = re.compile(r"\b(a|an|the)\b", re.UNICODE) OPTS = None def parse_args(): parser = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.") parser.add_argument("data_file", metavar="data.json", help="Input data JSON file.") parser.add_argument("pred_file", metavar="pred.json", help="Model predictions.") parser.add_argument( "--out-file", "-o", metavar="eval.json", help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file", "-n", metavar="na_prob.json", help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh", "-t", type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).', ) parser.add_argument( "--out-image-dir", "-p", metavar="out_images", default=None, help="Save precision-recall curves to directory." ) parser.add_argument("--verbose", "-v", action="store_true") if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: qid_to_has_ans[qa["id"]] = bool(qa["answers"]["text"]) return qid_to_has_ans def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return ARTICLES_REGEX.sub(" ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: qid = qa["id"] gold_answers = [t for t in qa["answers"]["text"] if normalize_answer(t)] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [""] if qid not in preds: print(f"Missing prediction for {qid}") continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values()) / total), ("f1", 100.0 * sum(f1_scores.values()) / total), ("total", total), ] ) else: total = len(qid_list) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), ("total", total), ] ) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval[f"{prefix}_{k}"] = new_eval[k] def plot_pr_curve(precisions, recalls, out_image, title): plt.step(recalls, precisions, color="b", alpha=0.2, where="post") plt.fill_between(recalls, precisions, step="post", alpha=0.2, color="b") plt.xlabel("Recall") plt.ylabel("Precision") plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(title) plt.savefig(out_image) plt.clf() def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): qid_list = sorted(na_probs, key=lambda k: na_probs[k]) true_pos = 0.0 cur_p = 1.0 cur_r = 0.0 precisions = [1.0] recalls = [0.0] avg_prec = 0.0 for i, qid in enumerate(qid_list): if qid_to_has_ans[qid]: true_pos += scores[qid] cur_p = true_pos / float(i + 1) cur_r = true_pos / float(num_true_pos) if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(cur_p) recalls.append(cur_r) if out_image: plot_pr_curve(precisions, recalls, out_image, title) return {"ap": 100.0 * avg_prec} def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return pr_exact = make_precision_recall_eval( exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_exact.png"), title="Precision-Recall curve for Exact Match score", ) pr_f1 = make_precision_recall_eval( f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_f1.png"), title="Precision-Recall curve for F1 score", ) oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval( oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, "pr_oracle.png"), title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)", ) merge_eval(main_eval, pr_exact, "pr_exact") merge_eval(main_eval, pr_f1, "pr_f1") merge_eval(main_eval, pr_oracle, "pr_oracle") def histogram_na_prob(na_probs, qid_list, image_dir, name): if not qid_list: return x = [na_probs[k] for k in qid_list] weights = np.ones_like(x) / float(len(x)) plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) plt.xlabel("Model probability of no-answer") plt.ylabel("Proportion of dataset") plt.title(f"Histogram of no-answer probability: {name}") plt.savefig(os.path.join(image_dir, f"na_prob_hist_{name}.png")) plt.clf() def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh def main(): with open(OPTS.data_file) as f: dataset_json = json.load(f) dataset = dataset_json["data"] with open(OPTS.pred_file) as f: preds = json.load(f) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: na_probs = json.load(f) else: na_probs = {k: 0.0 for k in preds} qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, preds) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, "HasAns") if no_ans_qids: no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, "NoAns") if OPTS.na_prob_file: find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, "hasAns") histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, "noAns") if OPTS.out_file: with open(OPTS.out_file, "w") as f: json.dump(out_eval, f) else: print(json.dumps(out_eval, indent=2)) if __name__ == "__main__": OPTS = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
datasets/metrics/squad_v2/evaluate.py/0
{ "file_path": "datasets/metrics/squad_v2/evaluate.py", "repo_id": "datasets", "token_count": 5444 }
71
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🤗 Datasets Notebooks You can find here a list of the official notebooks provided by Hugging Face. Also, we would like to list here interesting content created by the community. If you wrote some notebook(s) leveraging 🤗 Datasets and would like it to be listed here, please open a Pull Request so it can be included under the Community notebooks. ## Hugging Face's notebooks 🤗 ### Documentation notebooks You can open any page of the documentation as a notebook in Colab (there is a button directly on said pages) but they are also listed here if you need them: | Notebook | Description | | | |:----------|:-------------|:-------------|------:| | [Quickstart](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb) | A quick presentation on integrating Datasets into a model training workflow |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)|
datasets/notebooks/README.md/0
{ "file_path": "datasets/notebooks/README.md", "repo_id": "datasets", "token_count": 534 }
72
import importlib import importlib.metadata import logging import os import platform from pathlib import Path from typing import Optional from packaging import version logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging # Datasets S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets" CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets" REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}" # Metrics S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics" CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric" REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}" # Hub HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}" HUB_DEFAULT_VERSION = "main" PY_VERSION = version.parse(platform.python_version()) # General environment variables accepted values for booleans ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"}) # Imports DILL_VERSION = version.parse(importlib.metadata.version("dill")) FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec")) PANDAS_VERSION = version.parse(importlib.metadata.version("pandas")) PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow")) HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub")) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_JAX", "AUTO").upper() TORCH_VERSION = "N/A" TORCH_AVAILABLE = False if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None if TORCH_AVAILABLE: try: TORCH_VERSION = version.parse(importlib.metadata.version("torch")) logger.info(f"PyTorch version {TORCH_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling PyTorch because USE_TF is set") POLARS_VERSION = "N/A" POLARS_AVAILABLE = importlib.util.find_spec("polars") is not None if POLARS_AVAILABLE: try: POLARS_VERSION = version.parse(importlib.metadata.version("polars")) logger.info(f"Polars version {POLARS_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass TF_VERSION = "N/A" TF_AVAILABLE = False if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None if TF_AVAILABLE: # For the metadata, we have to look for both tensorflow and tensorflow-cpu for package in [ "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "intel-tensorflow", "tensorflow-rocm", "tensorflow-macos", ]: try: TF_VERSION = version.parse(importlib.metadata.version(package)) except importlib.metadata.PackageNotFoundError: continue else: break else: TF_AVAILABLE = False if TF_AVAILABLE: if TF_VERSION.major < 2: logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.") TF_AVAILABLE = False else: logger.info(f"TensorFlow version {TF_VERSION} available.") else: logger.info("Disabling Tensorflow because USE_TORCH is set") JAX_VERSION = "N/A" JAX_AVAILABLE = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None if JAX_AVAILABLE: try: JAX_VERSION = version.parse(importlib.metadata.version("jax")) logger.info(f"JAX version {JAX_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling JAX because USE_JAX is set to False") USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper() BEAM_VERSION = "N/A" BEAM_AVAILABLE = False if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES: try: BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam")) BEAM_AVAILABLE = True logger.info(f"Apache Beam version {BEAM_VERSION} available.") except importlib.metadata.PackageNotFoundError: pass else: logger.info("Disabling Apache Beam because USE_BEAM is set to False") # Optional tools for data loading SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None # Optional tools for feature decoding PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( importlib.import_module("soundfile").__libsndfile_version__ ) >= version.parse("1.0.31") IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( importlib.import_module("soundfile").__libsndfile_version__ ) >= version.parse("1.1.0") # Optional compression tools RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None # Cache location DEFAULT_XDG_CACHE_HOME = "~/.cache" XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME) DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface") HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME)) DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets") HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE)) DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics") HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE)) DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules") HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE)) DOWNLOADED_DATASETS_DIR = "downloads" DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR) DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH)) EXTRACTED_DATASETS_DIR = "extracted" DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR) EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH)) # Download count for the website HF_UPDATE_DOWNLOAD_COUNTS = ( os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES ) # Remote dataset scripts support __HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1") HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = ( True if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES else False if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES else None ) TIME_OUT_REMOTE_CODE = 15 # Datasets-server USE_PARQUET_EXPORT = True # Batch size constants. For more info, see: # https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations) DEFAULT_MAX_BATCH_SIZE = 1000 # Size of the preloaded record batch in `Dataset.__iter__` ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10 # Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare) MAX_SHARD_SIZE = "500MB" # Parquet configuration PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 # Offline mode HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES # Here, `True` will disable progress bars globally without possibility of enabling it # programmatically. `False` will enable them without possibility of disabling them. # If environment variable is not set (None), then the user is free to enable/disable # them programmatically. # TL;DR: env variable has priority over code __HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS") HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = ( __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None else None ) # In-memory DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE)) # File names DATASET_ARROW_FILENAME = "dataset.arrow" DATASET_INDICES_FILENAME = "indices.arrow" DATASET_STATE_JSON_FILENAME = "state.json" DATASET_INFO_FILENAME = "dataset_info.json" DATASETDICT_INFOS_FILENAME = "dataset_infos.json" LICENSE_FILENAME = "LICENSE" METRIC_INFO_FILENAME = "metric_info.json" DATASETDICT_JSON_FILENAME = "dataset_dict.json" METADATA_CONFIGS_FIELD = "configs" REPOCARD_FILENAME = "README.md" REPOYAML_FILENAME = ".huggingface.yaml" MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules" MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255 # Temporary cache directory prefix TEMP_CACHE_DIR_PREFIX = "hf_datasets-" # Streaming STREAMING_READ_MAX_RETRIES = 20 STREAMING_READ_RETRY_INTERVAL = 5 # Datasets without script DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10 ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 # Progress bars PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec # Maximum number of uploaded files per commit UPLOADS_MAX_NUMBER_PER_COMMIT = 50 # Backward compatibiliy MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
datasets/src/datasets/config.py/0
{ "file_path": "datasets/src/datasets/config.py", "repo_id": "datasets", "token_count": 4439 }
73
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class BaseCompressedFileFileSystem(AbstractArchiveFileSystem): """Read contents of compressed file as a filesystem with one file inside.""" root_marker = "" protocol: str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) compression: str = None # compression type in fsspec. ex: "gzip" extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs ): """ The compressed file system can be instantiated from any compressed file. It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive. The single file inside the filesystem is named after the compresssed file, without the compression extension at the end of the filename. Args: fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()`` mode (:obj:``str``): Currently, only 'rb' accepted target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL. target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS. """ super().__init__(self, **kwargs) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode self.file = fsspec.open( fo, mode="rb", protocol=target_protocol, compression=self.compression, client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed. }, **(target_options or {}), ) self.compressed_name = os.path.basename(self.file.path.split("::")[0]) self.uncompressed_name = ( self.compressed_name[: self.compressed_name.rindex(".")] if "." in self.compressed_name else self.compressed_name ) self.dir_cache = None @classmethod def _strip_protocol(cls, path): # compressed file paths are always relative to the archive root return super()._strip_protocol(path).lstrip("/") def _get_dirs(self): if self.dir_cache is None: f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name} self.dir_cache = {f["name"]: f} def cat(self, path: str): return self.file.open().read() def _open( self, path: str, mode: str = "rb", block_size=None, autocommit=True, cache_options=None, **kwargs, ): path = self._strip_protocol(path) if mode != "rb": raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'") return self.file.open() class Bz2FileSystem(BaseCompressedFileFileSystem): """Read contents of BZ2 file as a filesystem with one file inside.""" protocol = "bz2" compression = "bz2" extension = ".bz2" class GzipFileSystem(BaseCompressedFileFileSystem): """Read contents of GZIP file as a filesystem with one file inside.""" protocol = "gzip" compression = "gzip" extension = ".gz" class Lz4FileSystem(BaseCompressedFileFileSystem): """Read contents of LZ4 file as a filesystem with one file inside.""" protocol = "lz4" compression = "lz4" extension = ".lz4" class XzFileSystem(BaseCompressedFileFileSystem): """Read contents of .xz (LZMA) file as a filesystem with one file inside.""" protocol = "xz" compression = "xz" extension = ".xz" class ZstdFileSystem(BaseCompressedFileFileSystem): """ Read contents of zstd file as a filesystem with one file inside. Note that reading in binary mode with fsspec isn't supported yet: https://github.com/indygreg/python-zstandard/issues/136 """ protocol = "zstd" compression = "zstd" extension = ".zst" def __init__( self, fo: str, mode: str = "rb", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, block_size: int = DEFAULT_BLOCK_SIZE, **kwargs, ): super().__init__( fo=fo, mode=mode, target_protocol=target_protocol, target_options=target_options, block_size=block_size, **kwargs, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 _enter = self.file.__enter__ class WrappedFile: def __init__(self, file_): self._file = file_ def __enter__(self): self._file.__enter__() return self def __exit__(self, *args, **kwargs): self._file.__exit__(*args, **kwargs) def __iter__(self): return iter(self._file) def __next__(self): return next(self._file) def __getattr__(self, attr): return getattr(self._file, attr) def fixed_enter(*args, **kwargs): return WrappedFile(_enter(*args, **kwargs)) self.file.__enter__ = fixed_enter
datasets/src/datasets/filesystems/compression.py/0
{ "file_path": "datasets/src/datasets/filesystems/compression.py", "repo_id": "datasets", "token_count": 2608 }
74
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class JsonDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) self.field = field path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Json( cache_dir=cache_dir, data_files=path_or_paths, features=features, field=field, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class JsonDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, **to_json_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.encoding = "utf-8" self.storage_options = storage_options or {} self.to_json_kwargs = to_json_kwargs def write(self) -> int: _ = self.to_json_kwargs.pop("path_or_buf", None) orient = self.to_json_kwargs.pop("orient", "records") lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False) if "index" not in self.to_json_kwargs and orient in ["split", "table"]: self.to_json_kwargs["index"] = False # Determine the default compression value based on self.path_or_buf type default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None compression = self.to_json_kwargs.pop("compression", default_compression) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f"`datasets` currently does not support {compression} compression") if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with fsspec.open( self.path_or_buf, "wb", compression=compression, **(self.storage_options or {}) ) as buffer: written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) else: if compression: raise NotImplementedError( f"The compression parameter is not supported when writing to a buffer, but compression={compression}" " was passed. Please provide a local path instead." ) written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) return written def _batch_json(self, args): offset, orient, lines, to_json_kwargs = args batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) if not json_str.endswith("\n"): json_str += "\n" return json_str.encode(self.encoding) def _write( self, file_obj: BinaryIO, orient, lines, **to_json_kwargs, ) -> int: """Writes the pyarrow table as JSON lines to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating json from Arrow format", ): json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) written += file_obj.write(json_str) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in hf_tqdm( pool.imap( self._batch_json, [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating json from Arrow format", ): written += file_obj.write(json_str) return written
datasets/src/datasets/io/json.py/0
{ "file_path": "datasets/src/datasets/io/json.py", "repo_id": "datasets", "token_count": 3086 }
75
import glob import os import shutil import time import warnings from pathlib import Path from typing import List, Optional, Tuple, Union import pyarrow as pa import datasets import datasets.config import datasets.data_files from datasets.naming import filenames_for_dataset_split logger = datasets.utils.logging.get_logger(__name__) def _get_modification_time(cached_directory_path): return (Path(cached_directory_path)).stat().st_mtime def _find_hash_in_cache( dataset_name: str, config_name: Optional[str], cache_dir: Optional[str] ) -> Tuple[str, str, str]: cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE)) cached_datasets_directory_path_root = os.path.join(cache_dir, dataset_name.replace("/", "___")) cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, config_name or "*", "*", "*") ) if os.path.isdir(cached_directory_path) ] if not cached_directory_paths: if config_name is not None: cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, "*", "*", "*") ) if os.path.isdir(cached_directory_path) ] available_configs = sorted( {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths} ) raise ValueError( f"Couldn't find cache for {dataset_name}" + (f" for config '{config_name}'" if config_name else "") + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "") ) # get most recent cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1]) version, hash = cached_directory_path.parts[-2:] other_configs = [ Path(cached_directory_path).parts[-3] for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash)) if os.path.isdir(cached_directory_path) ] if not config_name and len(other_configs) > 1: raise ValueError( f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}" f"\nPlease specify which configuration to reload from the cache, e.g." f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')" ) config_name = cached_directory_path.parts[-3] warning_msg = ( f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} " f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})." ) logger.warning(warning_msg) return config_name, version, hash class Cache(datasets.ArrowBasedBuilder): def __init__( self, cache_dir: Optional[str] = None, dataset_name: Optional[str] = None, config_name: Optional[str] = None, version: Optional[str] = "0.0.0", hash: Optional[str] = None, base_path: Optional[str] = None, info: Optional[datasets.DatasetInfo] = None, features: Optional[datasets.Features] = None, token: Optional[Union[bool, str]] = None, use_auth_token="deprecated", repo_id: Optional[str] = None, data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None, data_dir: Optional[str] = None, storage_options: Optional[dict] = None, writer_batch_size: Optional[int] = None, name="deprecated", **config_kwargs, ): if use_auth_token != "deprecated": warnings.warn( "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'token={use_auth_token}' instead.", FutureWarning, ) token = use_auth_token if name != "deprecated": warnings.warn( "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.", category=FutureWarning, ) config_name = name if repo_id is None and dataset_name is None: raise ValueError("repo_id or dataset_name is required for the Cache dataset builder") if data_files is not None: config_kwargs["data_files"] = data_files if data_dir is not None: config_kwargs["data_dir"] = data_dir if hash == "auto" and version == "auto": # First we try to find a folder that takes the config_kwargs into account # e.g. with "default-data_dir=data%2Ffortran" as config_id config_id = self.BUILDER_CONFIG_CLASS(config_name or "default").create_config_id( config_kwargs=config_kwargs, custom_features=features ) config_name, version, hash = _find_hash_in_cache( dataset_name=repo_id or dataset_name, config_name=config_id, cache_dir=cache_dir, ) elif hash == "auto" or version == "auto": raise NotImplementedError("Pass both hash='auto' and version='auto' instead") super().__init__( cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, version=version, hash=hash, base_path=base_path, info=info, token=token, repo_id=repo_id, storage_options=storage_options, writer_batch_size=writer_batch_size, ) def _info(self) -> datasets.DatasetInfo: return datasets.DatasetInfo() def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs): if not os.path.exists(self.cache_dir): raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}") if output_dir is not None and output_dir != self.cache_dir: shutil.copytree(self.cache_dir, output_dir) def _split_generators(self, dl_manager): # used to stream from cache if isinstance(self.info.splits, datasets.SplitDict): split_infos: List[datasets.SplitInfo] = list(self.info.splits.values()) else: raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}") return [ datasets.SplitGenerator( name=split_info.name, gen_kwargs={ "files": filenames_for_dataset_split( self.cache_dir, dataset_name=self.dataset_name, split=split_info.name, filetype_suffix="arrow", shard_lengths=split_info.shard_lengths, ) }, ) for split_info in split_infos ] def _generate_tables(self, files): # used to stream from cache for file_idx, file in enumerate(files): with open(file, "rb") as f: try: for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", pa_table except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/cache/cache.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/cache/cache.py", "repo_id": "datasets", "token_count": 3777 }
76
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int logger = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class SparkConfig(datasets.BuilderConfig): """BuilderConfig for Spark.""" features: Optional[datasets.Features] = None def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") for partition_id in new_partition_order[1:]: partition_df = df.select("*").where(f"part_id = {partition_id}") df_combined = df_combined.union(partition_df) return df_combined def _generate_iterable_examples( df: "pyspark.sql.DataFrame", partition_order: List[int], ): import pyspark def generate_fn(): df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) row_id = 0 # pipeline next partition in parallel to hide latency rows = partition_df.toLocalIterator(prefetchPartitions=True) curr_partition = -1 for row in rows: row_as_dict = row.asDict() part_id = row_as_dict["part_id"] row_as_dict.pop("part_id") if curr_partition != part_id: curr_partition = part_id row_id = 0 yield f"{part_id}_{row_id}", row_as_dict row_id += 1 return generate_fn class SparkExamplesIterable(_BaseExamplesIterable): def __init__( self, df: "pyspark.sql.DataFrame", partition_order=None, ): self.df = df self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order) def __iter__(self): yield from self.generate_examples_fn() def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": partition_order = list(range(self.df.rdd.getNumPartitions())) generator.shuffle(partition_order) return SparkExamplesIterable(self.df, partition_order=partition_order) def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) return SparkExamplesIterable(self.df, partition_order=partition_order) @property def n_shards(self) -> int: return len(self.partition_order) class Spark(datasets.DatasetBuilder): BUILDER_CONFIG_CLASS = SparkConfig def __init__( self, df: "pyspark.sql.DataFrame", cache_dir: str = None, working_dir: str = None, **config_kwargs, ): import pyspark self._spark = pyspark.sql.SparkSession.builder.getOrCreate() self.df = df self._working_dir = working_dir super().__init__( cache_dir=cache_dir, config_name=str(self.df.semanticHash()), **config_kwargs, ) def _validate_cache_dir(self): # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling # error due to pickling the SparkContext. cache_dir = self._cache_dir # Returns the path of the created file. def create_cache_and_write_probe(context): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(cache_dir, exist_ok=True) probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(probe_file, "a") return [probe_file] if self._spark.conf.get("spark.master", "").startswith("local"): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: probe = ( self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() ) if os.path.isfile(probe[0]): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] def _repartition_df_if_needed(self, max_shard_size): import pyspark def get_arrow_batch_size(it): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) df_num_rows = self.df.count() sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. approx_bytes_per_row = ( self.df.limit(sample_num_rows) .repartition(1) .mapInArrow(get_arrow_batch_size, "batch_bytes: long") .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) .collect()[0] .sample_bytes / sample_num_rows ) approx_total_size = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) self.df = self.df.repartition(new_num_partitions) def _prepare_split_single( self, fpath: str, file_format: str, max_shard_size: int, ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath embed_local_files = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. features = self.config.features writer_batch_size = self._writer_batch_size storage_options = self._fs.storage_options def write_arrow(it): # Within the same SparkContext, no two task attempts will share the same attempt ID. task_id = pyspark.TaskContext().taskAttemptId() first_batch = next(it, None) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=["task_id", "num_examples", "num_bytes"], ) shard_id = 0 writer = writer_class( features=features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([first_batch]) writer.write_table(table) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) shard_id += 1 writer = writer_class( features=writer._features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([batch]) writer.write_table(table) if writer._num_bytes > 0: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(working_fpath)): dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) shutil.move(file, dest) stats = ( self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") .groupBy("task_id") .agg( pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), pyspark.sql.functions.count("num_bytes").alias("num_shards"), pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _prepare_split( self, split_generator: "datasets.SplitGenerator", file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): self._validate_cache_dir() max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) self._repartition_df_if_needed(max_shard_size) is_local = not is_remote_filesystem(self._fs) path_join = os.path.join if is_local else posixpath.join SUFFIX = "-TTTTT-SSSSS-of-NNNNN" fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = path_join(self._output_dir, fname) total_num_examples = 0 total_num_bytes = 0 total_shards = 0 task_id_and_num_shards = [] all_shard_lengths = [] for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): ( num_examples, num_bytes, num_shards, shard_lengths, ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards)) all_shard_lengths.extend(shard_lengths) split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: split_generator.split_info.shard_lengths = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. fs = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( task_id: int, shard_id: int, global_shard_id: int, ): rename( fs, fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) args = [] global_shard_id = 0 for i in range(len(task_id_and_num_shards)): task_id, num_shards = task_id_and_num_shards[i] for shard_id in range(num_shards): args.append([task_id, shard_id, global_shard_id]) global_shard_id += 1 self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() else: # don't use any pattern shard_id = 0 task_id = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace(SUFFIX, ""), ) def _get_examples_iterable_for_split( self, split_generator: "datasets.SplitGenerator", ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df)
datasets/src/datasets/packaged_modules/spark/spark.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/spark/spark.py", "repo_id": "datasets", "token_count": 6664 }
77
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=True) class AutomaticSpeechRecognition(TaskTemplate): task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"audio": Audio()}) label_schema: ClassVar[Features] = Features({"transcription": Value("string")}) audio_column: str = "audio" transcription_column: str = "transcription" def align_with_features(self, features): if self.audio_column not in features: raise ValueError(f"Column {self.audio_column} is not present in features.") if not isinstance(features[self.audio_column], Audio): raise ValueError(f"Column {self.audio_column} is not an Audio type.") task_template = copy.deepcopy(self) input_schema = self.input_schema.copy() input_schema["audio"] = features[self.audio_column] task_template.__dict__["input_schema"] = input_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
datasets/src/datasets/tasks/automatic_speech_recognition.py/0
{ "file_path": "datasets/src/datasets/tasks/automatic_speech_recognition.py", "repo_id": "datasets", "token_count": 459 }
78
import bz2 import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from ._filelock import FileLock from .logging import get_logger logger = get_logger(__name__) class ExtractManager: def __init__(self, cache_dir: Optional[str] = None): self.extract_dir = ( os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH ) self.extractor = Extractor def _get_output_path(self, path: str) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" abs_path = os.path.abspath(path) return os.path.join(self.extract_dir, hash_url_to_filename(abs_path)) def _do_extract(self, output_path: str, force_extract: bool) -> bool: return force_extract or ( not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path)) ) def extract(self, input_path: str, force_extract: bool = False) -> str: extractor_format = self.extractor.infer_extractor_format(input_path) if not extractor_format: return input_path output_path = self._get_output_path(input_path) if self._do_extract(output_path, force_extract): self.extractor.extract(input_path, output_path, extractor_format) return output_path class BaseExtractor(ABC): @classmethod @abstractmethod def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ... @staticmethod @abstractmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ... class MagicNumberBaseExtractor(BaseExtractor, ABC): magic_numbers: List[bytes] = [] @staticmethod def read_magic_number(path: Union[Path, str], magic_number_length: int): with open(path, "rb") as f: return f.read(magic_number_length) @classmethod def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: if not magic_number: magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers) try: magic_number = cls.read_magic_number(path, magic_number_length) except OSError: return False return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers) class TarExtractor(BaseExtractor): @classmethod def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: return tarfile.is_tarfile(path) @staticmethod def safemembers(members, output_path): """ Fix for CVE-2007-4559 Desc: Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot) sequence in filenames in a TAR archive, a related issue to CVE-2001-1267. See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559 From: https://stackoverflow.com/a/10077309 """ def resolved(path: str) -> str: return os.path.realpath(os.path.abspath(path)) def badpath(path: str, base: str) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(base, path)).startswith(base) def badlink(info, base: str) -> bool: # Links are interpreted relative to the directory containing the link tip = resolved(os.path.join(base, os.path.dirname(info.name))) return badpath(info.linkname, base=tip) base = resolved(output_path) for finfo in members: if badpath(finfo.name, base): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)") elif finfo.issym() and badlink(finfo, base): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}") elif finfo.islnk() and badlink(finfo, base): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}") else: yield finfo @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: os.makedirs(output_path, exist_ok=True) tar_file = tarfile.open(input_path) tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path)) tar_file.close() class GzipExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x1f\x8b"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with gzip.open(input_path, "rb") as gzip_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(gzip_file, extracted_file) class ZipExtractor(MagicNumberBaseExtractor): magic_numbers = [ b"PK\x03\x04", b"PK\x05\x06", # empty archive b"PK\x07\x08", # spanned archive ] @classmethod def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: if super().is_extractable(path, magic_number=magic_number): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(path, "rb") as fp: endrec = _EndRecData(fp) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: data = fp.read(sizeCentralDir) # CD is where we expect it to be if len(data) == sizeCentralDir: centdir = struct.unpack(structCentralDir, data) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: os.makedirs(output_path, exist_ok=True) with zipfile.ZipFile(input_path, "r") as zip_file: zip_file.extractall(output_path) zip_file.close() class XzExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\xfd\x37\x7a\x58\x5a\x00"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with lzma.open(input_path) as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class RarExtractor(MagicNumberBaseExtractor): magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile") import rarfile os.makedirs(output_path, exist_ok=True) rf = rarfile.RarFile(input_path) rf.extractall(output_path) rf.close() class ZstdExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x28\xb5\x2f\xfd"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard") import zstandard as zstd dctx = zstd.ZstdDecompressor() with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh: dctx.copy_stream(ifh, ofh) class Bzip2Extractor(MagicNumberBaseExtractor): magic_numbers = [b"\x42\x5a\x68"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with bz2.open(input_path, "rb") as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class SevenZipExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x37\x7a\xbc\xaf\x27\x1c"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr") import py7zr os.makedirs(output_path, exist_ok=True) with py7zr.SevenZipFile(input_path, "r") as archive: archive.extractall(output_path) class Lz4Extractor(MagicNumberBaseExtractor): magic_numbers = [b"\x04\x22\x4d\x18"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4") import lz4.frame with lz4.frame.open(input_path, "rb") as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class Extractor: # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) extractors: Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": Bzip2Extractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": Lz4Extractor, # <Added version="2.4.0"/> } @classmethod def _get_magic_number_max_length(cls): return max( len(extractor_magic_number) for extractor in cls.extractors.values() if issubclass(extractor, MagicNumberBaseExtractor) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _read_magic_number(path: Union[Path, str], magic_number_length: int): try: return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length) except OSError: return b"" @classmethod def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead.", category=FutureWarning, ) extractor_format = cls.infer_extractor_format(path) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def infer_extractor_format(cls, path: Union[Path, str]) -> str: # <Added version="2.4.0"/> magic_number_max_length = cls._get_magic_number_max_length() magic_number = cls._read_magic_number(path, magic_number_max_length) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(path, magic_number=magic_number): return extractor_format @classmethod def extract( cls, input_path: Union[Path, str], output_path: Union[Path, str], extractor_format: Optional[str] = None, # <Added version="2.4.0"/> extractor: Optional[BaseExtractor] = "deprecated", ) -> None: os.makedirs(os.path.dirname(output_path), exist_ok=True) # Prevent parallel extractions lock_path = str(Path(output_path).with_suffix(".lock")) with FileLock(lock_path): shutil.rmtree(output_path, ignore_errors=True) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead.", category=FutureWarning, ) extractor = extractor if extractor != "deprecated" else extractor_format else: extractor = cls.extractors[extractor_format] return extractor.extract(input_path, output_path) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0.", category=FutureWarning, ) for extractor in cls.extractors.values(): if extractor.is_extractable(input_path): return extractor.extract(input_path, output_path)
datasets/src/datasets/utils/extract.py/0
{ "file_path": "datasets/src/datasets/utils/extract.py", "repo_id": "datasets", "token_count": 6394 }
79
from typing import List import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sharding without ambiguity for users lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} if len(set(lists_lengths.values())) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) max_length = max(lists_lengths.values(), default=0) return max(1, max_length) def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: """ Get the range of shard indices per job. If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard. The shards indices order is preserved: e.g. all the first shards are given the first job. Moreover all the jobs are given approximately the same number of shards. Example: ```python >>> _distribute_shards(2, max_num_jobs=4) [range(0, 1), range(1, 2)] >>> _distribute_shards(10, max_num_jobs=3) [range(0, 4), range(4, 7), range(7, 10)] ``` """ shards_indices_per_group = [] for group_idx in range(max_num_jobs): num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 shard_indices = range(start, start + num_shards_to_add) shards_indices_per_group.append(shard_indices) return shards_indices_per_group def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]: """Split the gen_kwargs into `max_num_job` gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs) if num_shards == 1: return [dict(gen_kwargs)] else: shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(value, list) else value for key, value in gen_kwargs.items() } for group_idx in range(len(shard_indices_per_group)) ] def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key], list) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict: """Return a shuffled copy of the input gen_kwargs""" # We must shuffle all the lists, and lists of the same size must have the same shuffling. # This way entangled lists of (shard, shard_metadata) are still in the right order. # First, let's generate the shuffled indices per list size list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)} indices_per_size = {} for size in list_sizes: indices_per_size[size] = list(range(size)) rng.shuffle(indices_per_size[size]) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes shuffled_kwargs = dict(gen_kwargs) for key, value in shuffled_kwargs.items(): if isinstance(value, list): shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]] return shuffled_kwargs
datasets/src/datasets/utils/sharding.py/0
{ "file_path": "datasets/src/datasets/utils/sharding.py", "repo_id": "datasets", "token_count": 1742 }
80
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _TestCommandArgs = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", "num_proc", ], defaults=[None, None, None, False, False, False, False, False, None], ) def is_1percent_close(source, target): return (abs(source - target) / target) < 0.01 @pytest.mark.integration def test_test_command(dataset_loading_script_dir): args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True) test_command = TestCommand(*args) test_command.run() dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md") assert os.path.exists(dataset_readme_path) dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir) expected_dataset_infos = DatasetInfosDict( { "default": DatasetInfo( features=Features( { "tokens": Sequence(Value("string")), "ner_tags": Sequence( ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]) ), "langs": Sequence(Value("string")), "spans": Sequence(Value("string")), } ), splits=[ { "name": "train", "num_bytes": 2351563, "num_examples": 10000, }, { "name": "validation", "num_bytes": 238418, "num_examples": 1000, }, ], download_size=3940680, dataset_size=2589981, ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key) if key == "num_bytes": assert is_1percent_close(result, expected) elif key == "splits": assert list(result) == list(expected) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes) else: result == expected
datasets/tests/commands/test_test.py/0
{ "file_path": "datasets/tests/commands/test_test.py", "repo_id": "datasets", "token_count": 1511 }
81
import contextlib import csv import json import os import sqlite3 import tarfile import textwrap import zipfile import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config # dataset + arrow_file @pytest.fixture(scope="session") def dataset(): n = 10 features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), "id": datasets.Value("int64"), } ) dataset = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(n)), }, features=features, ) return dataset @pytest.fixture(scope="session") def arrow_file(tmp_path_factory, dataset): filename = str(tmp_path_factory.mktemp("data") / "file.arrow") dataset.map(cache_file_name=filename) return filename # FILE_CONTENT + files FILE_CONTENT = """\ Text data. Second line of data.""" @pytest.fixture(scope="session") def text_file(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.txt" data = FILE_CONTENT with open(filename, "w") as f: f.write(data) return filename @pytest.fixture(scope="session") def bz2_file(tmp_path_factory): import bz2 path = tmp_path_factory.mktemp("data") / "file.txt.bz2" data = bytes(FILE_CONTENT, "utf-8") with bz2.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def gz_file(tmp_path_factory): import gzip path = str(tmp_path_factory.mktemp("data") / "file.txt.gz") data = bytes(FILE_CONTENT, "utf-8") with gzip.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def lz4_file(tmp_path_factory): if datasets.config.LZ4_AVAILABLE: import lz4.frame path = tmp_path_factory.mktemp("data") / "file.txt.lz4" data = bytes(FILE_CONTENT, "utf-8") with lz4.frame.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def seven_zip_file(tmp_path_factory, text_file): if datasets.config.PY7ZR_AVAILABLE: import py7zr path = tmp_path_factory.mktemp("data") / "file.txt.7z" with py7zr.SevenZipFile(path, "w") as archive: archive.write(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def tar_file(tmp_path_factory, text_file): import tarfile path = tmp_path_factory.mktemp("data") / "file.txt.tar" with tarfile.TarFile(path, "w") as f: f.add(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def xz_file(tmp_path_factory): import lzma path = tmp_path_factory.mktemp("data") / "file.txt.xz" data = bytes(FILE_CONTENT, "utf-8") with lzma.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def zip_file(tmp_path_factory, text_file): import zipfile path = tmp_path_factory.mktemp("data") / "file.txt.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def zstd_file(tmp_path_factory): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd path = tmp_path_factory.mktemp("data") / "file.txt.zst" data = bytes(FILE_CONTENT, "utf-8") with zstd.open(path, "wb") as f: f.write(data) return path # xml_file @pytest.fixture(scope="session") def xml_file(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.xml" data = textwrap.dedent( """\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(filename, "w") as f: f.write(data) return filename DATA = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] DATA2 = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] DATA_DICT_OF_LISTS = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } DATA_312 = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] DATA_STR = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope="session") def dataset_dict(): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session") def arrow_path(tmp_path_factory): dataset = datasets.Dataset.from_dict(DATA_DICT_OF_LISTS) path = str(tmp_path_factory.mktemp("data") / "dataset.arrow") dataset.map(cache_file_name=path) return path @pytest.fixture(scope="session") def sqlite_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.sqlite") with contextlib.closing(sqlite3.connect(path)) as con: cur = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)") for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values())) con.commit() return path @pytest.fixture(scope="session") def csv_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.csv") with open(path, "w", newline="") as f: writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(item) return path @pytest.fixture(scope="session") def csv2_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset2.csv") with open(path, "w", newline="") as f: writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(item) return path @pytest.fixture(scope="session") def bz2_csv_path(csv_path, tmp_path_factory): import bz2 path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2" with open(csv_path, "rb") as f: data = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bz2.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def zip_csv_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("zip_csv_path") / "csv-dataset.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.basename(csv_path)) f.write(csv2_path, arcname=os.path.basename(csv2_path)) return path @pytest.fixture(scope="session") def zip_uppercase_csv_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.csv.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.basename(csv_path.replace(".csv", ".CSV"))) f.write(csv2_path, arcname=os.path.basename(csv2_path.replace(".csv", ".CSV"))) return path @pytest.fixture(scope="session") def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path))) f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path))) return path @pytest.fixture(scope="session") def parquet_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.parquet") schema = pa.schema( { "col_1": pa.string(), "col_2": pa.int64(), "col_3": pa.float64(), } ) with open(path, "wb") as f: writer = pq.ParquetWriter(f, schema=schema) pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema) writer.write_table(pa_table) writer.close() return path @pytest.fixture(scope="session") def geoparquet_path(tmp_path_factory): df = pd.read_parquet(path="https://github.com/opengeospatial/geoparquet/raw/v1.0.0/examples/example.parquet") path = str(tmp_path_factory.mktemp("data") / "dataset.geoparquet") df.to_parquet(path=path) return path @pytest.fixture(scope="session") def json_list_of_dicts_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.json") data = {"data": DATA} with open(path, "w") as f: json.dump(data, f) return path @pytest.fixture(scope="session") def json_dict_of_lists_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.json") data = {"data": DATA_DICT_OF_LISTS} with open(path, "w") as f: json.dump(data, f) return path @pytest.fixture(scope="session") def jsonl_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl") with open(path, "w") as f: for item in DATA: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl2_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl") with open(path, "w") as f: for item in DATA: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl_312_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl") with open(path, "w") as f: for item in DATA_312: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl_str_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl") with open(path, "w") as f: for item in DATA_STR: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def text_gz_path(tmp_path_factory, text_path): import gzip path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz") with open(text_path, "rb") as orig_file: with gzip.open(path, "wb") as zipped_file: zipped_file.writelines(orig_file) return path @pytest.fixture(scope="session") def jsonl_gz_path(tmp_path_factory, jsonl_path): import gzip path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz") with open(jsonl_path, "rb") as orig_file: with gzip.open(path, "wb") as zipped_file: zipped_file.writelines(orig_file) return path @pytest.fixture(scope="session") def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(jsonl_path, arcname=os.path.basename(jsonl_path)) f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path)) return path @pytest.fixture(scope="session") def zip_nested_jsonl_path(zip_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(zip_jsonl_path, arcname=os.path.join("nested", os.path.basename(zip_jsonl_path))) return path @pytest.fixture(scope="session") def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path))) f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path))) return path @pytest.fixture(scope="session") def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar" with tarfile.TarFile(path, "w") as f: f.add(jsonl_path, arcname=os.path.basename(jsonl_path)) f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path)) return path @pytest.fixture(scope="session") def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar" with tarfile.TarFile(path, "w") as f: f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path))) return path @pytest.fixture(scope="session") def text_path(tmp_path_factory): data = ["0", "1", "2", "3"] path = str(tmp_path_factory.mktemp("data") / "dataset.txt") with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def text2_path(tmp_path_factory): data = ["0", "1", "2", "3"] path = str(tmp_path_factory.mktemp("data") / "dataset2.txt") with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def text_dir(tmp_path_factory): data = ["0", "1", "2", "3"] path = tmp_path_factory.mktemp("data_text_dir") / "dataset.txt" with open(path, "w") as f: for item in data: f.write(item + "\n") return path.parent @pytest.fixture(scope="session") def text_dir_with_unsupported_extension(tmp_path_factory): data = ["0", "1", "2", "3"] path = tmp_path_factory.mktemp("data") / "dataset.abc" with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def zip_text_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.text.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.basename(text_path)) f.write(text2_path, arcname=os.path.basename(text2_path)) return path @pytest.fixture(scope="session") def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path))) f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path))) return path @pytest.fixture(scope="session") def zip_unsupported_ext_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.ext.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.basename("unsupported.ext")) f.write(text2_path, arcname=os.path.basename("unsupported_2.ext")) return path @pytest.fixture(scope="session") def text_path_with_unicode_new_lines(tmp_path_factory): text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"]) path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt") with open(path, "w", encoding="utf-8") as f: f.write(text) return path @pytest.fixture(scope="session") def image_file(): return os.path.join("tests", "features", "data", "test_image_rgb.jpg") @pytest.fixture(scope="session") def audio_file(): return os.path.join("tests", "features", "data", "test_audio_44100.wav") @pytest.fixture(scope="session") def zip_image_path(image_file, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.img.zip" with zipfile.ZipFile(path, "w") as f: f.write(image_file, arcname=os.path.basename(image_file)) f.write(image_file, arcname=os.path.basename(image_file).replace(".jpg", "2.jpg")) return path @pytest.fixture(scope="session") def data_dir_with_hidden_files(tmp_path_factory): data_dir = tmp_path_factory.mktemp("data_dir") (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) # hidden file with open(data_dir / "subdir" / ".test.txt", "w") as f: f.write("bar\n" * 10) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / ".subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) return data_dir
datasets/tests/fixtures/files.py/0
{ "file_path": "datasets/tests/fixtures/files.py", "repo_id": "datasets", "token_count": 8208 }
82
import importlib import shutil import textwrap import pytest from datasets import ClassLabel, DownloadManager, Features, Value from datasets.data_files import DataFilesDict, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( FolderBasedBuilder, FolderBasedBuilderConfig, ) from datasets.tasks import TextClassification remote_files = [ "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hallo.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hello.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour2.txt", ] class DummyFolderBasedBuilder(FolderBasedBuilder): BASE_FEATURE = dict BASE_COLUMN_NAME = "base" BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig EXTENSIONS = [".txt"] CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label") @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "autofolder_cache_dir") @pytest.fixture def auto_text_file(text_file): return str(text_file) @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_different_levels" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "subdir" / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_different_levels = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_different_levels @pytest.fixture def data_files_with_one_label_no_metadata(tmp_path, auto_text_file): # only one label found = all files in a single dir/in a root dir data_dir = tmp_path / "data_files_with_one_label" data_dir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) return data_files_with_one_label @pytest.fixture def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file_class0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file_class1.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"} {"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def file_with_metadata(tmp_path, text_file): filename = tmp_path / "file.txt" shutil.copyfile(text_file, filename) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(metadata_filename) @pytest.fixture() def files_with_metadata_that_misses_one_sample(tmp_path, auto_text_file): filename = tmp_path / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = tmp_path / "file2.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file2.txt" shutil.copyfile(auto_text_file, filename2) filename3 = subdir / "file3.txt" # in subdir shutil.copyfile(auto_text_file, filename3) metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "file2.txt", "additional_feature": "Second dummy file"} {"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) filename = train_dir / "file.txt" # train shutil.copyfile(auto_text_file, filename) filename2 = train_dir / "file2.txt" # train shutil.copyfile(auto_text_file, filename2) filename3 = test_dir / "file3.txt" # test shutil.copyfile(auto_text_file, filename3) train_metadata_filename = train_dir / "metadata.jsonl" train_metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Train dummy file"} {"file_name": "file2.txt", "additional_feature": "Second train dummy file"} """ ) with open(train_metadata_filename, "w", encoding="utf-8") as f: f.write(train_metadata) test_metadata_filename = test_dir / "metadata.jsonl" test_metadata = textwrap.dedent( """\ {"file_name": "file3.txt", "additional_feature": "Test dummy file"} """ ) with open(test_metadata_filename, "w", encoding="utf-8") as f: f.write(test_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = archive_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir / "file2.txt" # in subdir shutil.copyfile(auto_text_file, filename2) metadata_filename = archive_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) shutil.make_archive(archive_dir, "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert autofolder.info.features == Features({"base": {}, "label": ClassLabel(names=["class0", "class1"])}) generator = autofolder._generate_examples(**gen_kwargs) assert all(example["label"] in {"class0", "class1"} for _, example in generator) def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir): # builder would try to access non-existing attributes of a default `BuilderConfig` class # as a custom one is not provided with pytest.raises(AttributeError): _ = FolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, ) # test that AutoFolder is extended for streaming when it's child class is instantiated: # see line 115 in src/datasets/streaming.py def test_streaming_patched(): _ = DummyFolderBasedBuilder() module = importlib.import_module(FolderBasedBuilder.__module__) assert hasattr(module, "_patched_for_streaming") assert module._patched_for_streaming @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_duplicated_label_key( files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog ): class0_file, class1_file, metadata_file = files_with_labels_and_duplicated_label_key_in_metadata autofolder = DummyFolderBasedBuilder( data_files=[class0_file, class1_file, metadata_file], cache_dir=cache_dir, drop_metadata=drop_metadata, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is False: # infer labels from directories even if metadata files are found warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) assert warning_in_logs if drop_metadata is not True else not warning_in_logs assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"]) assert all(example["label"] in ["class0", "class1"] for _, example in generator) else: if drop_metadata is not True: # labels are from metadata assert autofolder.info.features["label"] == Value("string") assert all(example["label"] in ["CLASS_0", "CLASS_1"] for _, example in generator) else: # drop both labels and metadata assert autofolder.info.features == Features({"base": {}}) assert all(example.keys() == {"base"} for _, example in generator) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels( data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir ): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False generator = autofolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"base", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir): file, metadata_file = file_with_metadata autofolder = DummyFolderBasedBuilder( data_files=[file, metadata_file], drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is (drop_labels is False) generator = autofolder._generate_examples(**gen_kwargs) expected_columns = {"base"} if gen_kwargs["add_metadata"]: expected_columns.add("additional_feature") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @pytest.mark.parametrize("remote", [True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_different_levels_no_metadata( data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir ): data_files = remote_files if remote else data_files_with_different_levels_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if files are on different levels in dir structure assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("remote", [False, True]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir): data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if only one label is found (=if there is a single dir) assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_data_files_with_metadata_that_misses_one_sample( files_with_metadata_that_misses_one_sample, drop_metadata, cache_dir ): file, file2, metadata_file = files_with_metadata_that_misses_one_sample if not drop_metadata: features = Features({"base": None, "additional_feature": Value("string")}) else: features = Features({"base": None}) autofolder = DummyFolderBasedBuilder( data_files=[file, file2, metadata_file], drop_metadata=drop_metadata, features=features, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): list(generator) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("n_splits", [1, 2]) def test_data_files_with_metadata_and_splits( streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata ): data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, ) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files.items(), generated_splits): assert split == generated_split.name expected_num_of_examples = len(files) - 1 generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits): assert split == generated_split.name num_of_archives = len(files) expected_num_of_examples = 2 * num_of_archives generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "bad_metadata.jsonl" # bad file metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) assert all("additional_feature" not in example for _, example in generator) def test_data_files_with_wrong_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"bad_file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: _ = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert "`file_name` must be present" in str(exc_info.value)
datasets/tests/packaged_modules/test_folder_based_builder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_folder_based_builder.py", "repo_id": "datasets", "token_count": 8915 }
83
import unittest import warnings from datasets.utils import experimental @experimental def dummy_function(): return "success" class TestExperimentalFlag(unittest.TestCase): def test_experimental_warning(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") self.assertEqual(dummy_function(), "success") self.assertEqual(len(w), 1)
datasets/tests/test_experimental.py/0
{ "file_path": "datasets/tests/test_experimental.py", "repo_id": "datasets", "token_count": 152 }
84
# Copyright 2020 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration pytestmark = pytest.mark.integration REQUIRE_FAIRSEQ = {"comet"} _has_fairseq = importlib.util.find_spec("fairseq") is not None UNSUPPORTED_ON_WINDOWS = {"code_eval"} _on_windows = os.name == "nt" REQUIRE_TRANSFORMERS = {"bertscore", "frugalscore", "perplexity"} _has_transformers = importlib.util.find_spec("transformers") is not None def skip_if_metric_requires_fairseq(test_case): @wraps(test_case) def wrapper(self, metric_name): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"') else: test_case(self, metric_name) return wrapper def skip_if_metric_requires_transformers(test_case): @wraps(test_case) def wrapper(self, metric_name): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"') else: test_case(self, metric_name) return wrapper def skip_on_windows_if_not_windows_compatible(test_case): @wraps(test_case) def wrapper(self, metric_name): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"') else: test_case(self, metric_name) return wrapper def get_local_metric_names(): metrics = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names()) @for_all_test_methods( skip_if_metric_requires_fairseq, skip_if_metric_requires_transformers, skip_on_windows_if_not_windows_compatible ) @local class LocalMetricTest(parameterized.TestCase): INTENSIVE_CALLS_PATCHER = {} metric_name = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning") @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning") def test_load_metric(self, metric_name): doctest.ELLIPSIS_MARKER = "[...]" metric_module = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", metric_name)).module_path ) metric = datasets.load.import_main_class(metric_module.__name__, dataset=False) # check parameters parameters = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(metric_name, metric_module.__name__): with self.use_local_metrics(): try: results = doctest.testmod(metric_module, verbose=True, raise_on_error=True) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed, 0) self.assertGreater(results.attempted, 1) @slow def test_load_real_metric(self, metric_name): doctest.ELLIPSIS_MARKER = "[...]" metric_module = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", metric_name)).module_path ) # run doctest with self.use_local_metrics(): results = doctest.testmod(metric_module, verbose=True, raise_on_error=True) self.assertEqual(results.failed, 0) self.assertGreater(results.attempted, 1) @contextmanager def patch_intensive_calls(self, metric_name, module_name): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](module_name): yield else: yield @contextmanager def use_local_metrics(self): def load_local_metric(metric_name, *args, **kwargs): return load_metric(os.path.join("metrics", metric_name), *args, **kwargs) with patch("datasets.load_metric") as mock_load_metric: mock_load_metric.side_effect = load_local_metric yield @classmethod def register_intensive_calls_patcher(cls, metric_name): def wrapper(patcher): patcher = contextmanager(patcher) cls.INTENSIVE_CALLS_PATCHER[metric_name] = patcher return patcher return wrapper # Metrics intensive calls patchers # -------------------------------- @LocalMetricTest.register_intensive_calls_patcher("bleurt") def patch_bleurt(module_name): import tensorflow.compat.v1 as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags class MockedPredictor(Predictor): def predict(self, input_dict): assert len(input_dict["input_ids"]) == 2 return np.array([1.03, 1.04]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor") as mock_create_predictor: mock_create_predictor.return_value = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore") def patch_bertscore(module_name): import torch def bert_cos_score_idf(model, refs, *args, **kwargs): return torch.tensor([[1.0, 1.0, 1.0]] * len(refs)) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model"), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: mock_bert_cos_score_idf.side_effect = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet") def patch_comet(module_name): def load_from_checkpoint(model_path): class Model: def predict(self, data, *args, **kwargs): assert len(data) == 2 scores = [0.19, 0.92] return scores, sum(scores) / len(scores) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model") as mock_download_model: mock_download_model.return_value = None with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint: mock_load_from_checkpoint.side_effect = load_from_checkpoint yield def test_seqeval_raises_when_incorrect_scheme(): metric = load_metric(os.path.join("metrics", "seqeval")) wrong_scheme = "ERROR" error_message = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(ValueError, match=re.escape(error_message)): metric.compute(predictions=[], references=[], scheme=wrong_scheme)
datasets/tests/test_metric_common.py/0
{ "file_path": "datasets/tests/test_metric_common.py", "repo_id": "datasets", "token_count": 3144 }
85
import asyncio import importlib.metadata import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False) _run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True) _run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio require_sndfile = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam require_beam = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility require_dill_gt_0_3_2 = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows require_not_windows = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def require_faiss(test_case): """ Decorator marking a test that requires Faiss. These tests are skipped when Faiss isn't installed. """ try: import faiss # noqa except ImportError: test_case = unittest.skip("test requires faiss")(test_case) return test_case def require_regex(test_case): """ Decorator marking a test that requires regex. These tests are skipped when Regex isn't installed. """ try: import regex # noqa except ImportError: test_case = unittest.skip("test requires regex")(test_case) return test_case def require_elasticsearch(test_case): """ Decorator marking a test that requires ElasticSearch. These tests are skipped when ElasticSearch isn't installed. """ try: import elasticsearch # noqa except ImportError: test_case = unittest.skip("test requires elasticsearch")(test_case) return test_case def require_sqlalchemy(test_case): """ Decorator marking a test that requires SQLAlchemy. These tests are skipped when SQLAlchemy isn't installed. """ try: import sqlalchemy # noqa except ImportError: test_case = unittest.skip("test requires sqlalchemy")(test_case) return test_case def require_torch(test_case): """ Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. """ if not config.TORCH_AVAILABLE: test_case = unittest.skip("test requires PyTorch")(test_case) return test_case def require_polars(test_case): """ Decorator marking a test that requires Polars. These tests are skipped when Polars isn't installed. """ if not config.POLARS_AVAILABLE: test_case = unittest.skip("test requires Polars")(test_case) return test_case def require_tf(test_case): """ Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed. """ if not config.TF_AVAILABLE: test_case = unittest.skip("test requires TensorFlow")(test_case) return test_case def require_jax(test_case): """ Decorator marking a test that requires JAX. These tests are skipped when JAX isn't installed. """ if not config.JAX_AVAILABLE: test_case = unittest.skip("test requires JAX")(test_case) return test_case def require_pil(test_case): """ Decorator marking a test that requires Pillow. These tests are skipped when Pillow isn't installed. """ if not config.PIL_AVAILABLE: test_case = unittest.skip("test requires Pillow")(test_case) return test_case def require_transformers(test_case): """ Decorator marking a test that requires transformers. These tests are skipped when transformers isn't installed. """ try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(test_case) else: return test_case def require_tiktoken(test_case): """ Decorator marking a test that requires tiktoken. These tests are skipped when transformers isn't installed. """ try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(test_case) else: return test_case def require_spacy(test_case): """ Decorator marking a test that requires spacy. These tests are skipped when they aren't installed. """ try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(test_case) else: return test_case def require_spacy_model(model): """ Decorator marking a test that requires a spacy model. These tests are skipped when they aren't installed. """ def _require_spacy_model(test_case): try: import spacy # noqa F401 spacy.load(model) except ImportError: return unittest.skip("test requires spacy")(test_case) except OSError: return unittest.skip("test requires spacy model '{}'".format(model))(test_case) else: return test_case return _require_spacy_model def require_pyspark(test_case): """ Decorator marking a test that requires pyspark. These tests are skipped when pyspark isn't installed. """ try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(test_case) else: return test_case def require_joblibspark(test_case): """ Decorator marking a test that requires joblibspark. These tests are skipped when pyspark isn't installed. """ try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(test_case) else: return test_case def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if not _run_slow_tests or _run_slow_tests == 0: test_case = unittest.skip("test is slow")(test_case) return test_case def local(test_case): """ Decorator marking a test as local Local tests are run by default. Set the RUN_LOCAL environment variable to a falsy value to not run them. """ if not _run_local_tests or _run_local_tests == 0: test_case = unittest.skip("test is local")(test_case) return test_case def packaged(test_case): """ Decorator marking a test as packaged Packaged tests are run by default. Set the RUN_PACKAGED environment variable to a falsy value to not run them. """ if not _run_packaged_tests or _run_packaged_tests == 0: test_case = unittest.skip("test is packaged")(test_case) return test_case def remote(test_case): """ Decorator marking a test as one that relies on GitHub or the Hugging Face Hub. Remote tests are skipped by default. Set the RUN_REMOTE environment variable to a falsy value to not run them. """ if not _run_remote_tests or _run_remote_tests == 0: test_case = unittest.skip("test requires remote")(test_case) return test_case def for_all_test_methods(*decorators): def decorate(cls): for name, fn in cls.__dict__.items(): if callable(fn) and name.startswith("test"): for decorator in decorators: fn = decorator(fn) setattr(cls, name, fn) return cls return decorate class RequestWouldHangIndefinitelyError(Exception): pass class OfflineSimulationMode(Enum): CONNECTION_FAILS = 0 CONNECTION_TIMES_OUT = 1 HF_DATASETS_OFFLINE_SET_TO_1 = 2 @contextmanager def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16): """ Simulate offline mode. There are three offline simulatiom modes: CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call. Connection errors are created by mocking socket.socket CONNECTION_TIMES_OUT: the connection hangs until it times out. The default timeout value is low (1e-16) to speed up the tests. Timeout errors are created by mocking requests.request HF_DATASETS_OFFLINE_SET_TO_1: the HF_DATASETS_OFFLINE environment variable is set to 1. This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error. """ online_request = requests.Session().request def timeout_request(session, method, url, **kwargs): # Change the url to an invalid url so that the connection hangs invalid_url = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) kwargs["timeout"] = timeout try: return online_request(method, invalid_url, **kwargs) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier e.request.url = url max_retry_error = e.args[0] max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),) e.args = (max_retry_error,) raise def raise_connection_error(session, prepared_request, **kwargs): raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send", raise_connection_error): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request", timeout_request): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE", True): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def set_current_working_directory_to_temp_dir(*args, **kwargs): original_working_dir = str(Path().resolve()) with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir: try: os.chdir(tmp_dir) yield finally: os.chdir(original_working_dir) @contextmanager def assert_arrow_memory_increases(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def assert_arrow_memory_doesnt_increase(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def is_rng_equal(rng1, rng2): return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist() def xfail_if_500_502_http_error(func): import decorator from requests.exceptions import HTTPError def _wrapper(func, *args, **kwargs): try: return func(*args, **kwargs) except HTTPError as err: if str(err).startswith("500") or str(err).startswith("502"): pytest.xfail(str(err)) raise err return decorator.decorator(_wrapper, func) # --- distributed testing functions --- # # copied from transformers # originally adapted from https://stackoverflow.com/a/59041913/9201239 class _RunOutput: def __init__(self, returncode, stdout, stderr): self.returncode = returncode self.stdout = stdout self.stderr = stderr async def _read_stream(stream, callback): while True: line = await stream.readline() if line: callback(line) else: break async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: if echo: print("\nRunning: ", " ".join(cmd)) p = await asyncio.create_subprocess_exec( cmd[0], *cmd[1:], stdin=stdin, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=env, ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) out = [] err = [] def tee(line, sink, pipe, label=""): line = line.decode("utf-8").rstrip() sink.append(line) if not quiet: print(label, line, file=pipe) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")), _read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")), ], timeout=timeout, ) return _RunOutput(await p.wait(), out, err) def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: loop = asyncio.get_event_loop() result = loop.run_until_complete( _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) ) cmd_str = " ".join(cmd) if result.returncode > 0: stderr = "\n".join(result.stderr) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output.") return result def pytest_xdist_worker_id(): """ Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 if `-n 1` or `pytest-xdist` isn't being used. """ worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") worker = re.sub(r"^gw", "", worker, 0, re.M) return int(worker) def get_torch_dist_unique_port(): """ Returns a port number that can be fed to `torchrun`'s `--master_port` argument. Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same port at once. """ port = 29500 uniq_delta = pytest_xdist_worker_id() return port + uniq_delta
datasets/tests/utils.py/0
{ "file_path": "datasets/tests/utils.py", "repo_id": "datasets", "token_count": 6422 }
86
<jupyter_start><jupyter_text>Unit 8: Proximal Policy Gradient (PPO) with PyTorch 🤖In this notebook, you'll learn to **code your PPO agent from scratch with PyTorch using CleanRL implementation as model**.To test its robustness, we're going to train it in:- [LunarLander-v2 🚀](https://www.gymlibrary.dev/environments/box2d/lunar_lander/) ⬇️ Here is an example of what you will achieve. ⬇️<jupyter_code>%%html <video controls autoplay><source src="https://huggingface.co/sb3/ppo-LunarLander-v2/resolve/main/replay.mp4" type="video/mp4"></video><jupyter_output><empty_output><jupyter_text>We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to **code your PPO agent from scratch using PyTorch**.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning CourseIn this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments** Don’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 Study [PPO by reading Unit 8](https://huggingface.co/deep-rl-course/unit8/introduction) 🤗 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push one model, we don't ask for a minimal result but we **advise you to try different hyperparameters settings to get better results**.If you don't find your model, **go to the bottom of the page and click on the refresh button**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🔽During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>!pip install setuptools==65.5.0 %%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !apt install swig cmake !pip install pyglet==1.5 !pip3 install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install dependencies 🔽For this exercise, we use `gym==0.22`.<jupyter_code>!pip install gym==0.22 !pip install imageio-ffmpeg !pip install huggingface_hub !pip install gym[box2d]==0.22<jupyter_output><empty_output><jupyter_text>Let's code PPO from scratch with Costa Huang tutorial- For the core implementation of PPO we're going to use the excellent [Costa Huang](https://costa.sh/) tutorial.- In addition to the tutorial, to go deeper you can read the 37 core implementation details: https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/👉 The video tutorial: https://youtu.be/MEt6rrxH8W4<jupyter_code>from IPython.display import HTML HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/MEt6rrxH8W4" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')<jupyter_output><empty_output><jupyter_text>- The best is to code first on the cell below, this way, if you kill the machine **you don't loose the implementation**.<jupyter_code>### Your code here:<jupyter_output><empty_output><jupyter_text>Add the Hugging Face Integration 🤗- In order to push our model to the Hub, we need to define a function `package_to_hub` - Add dependencies we need to push our model to the Hub<jupyter_code>from huggingface_hub import HfApi, upload_folder from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import tempfile import json import shutil import imageio from wasabi import Printer msg = Printer()<jupyter_output><empty_output><jupyter_text>- Add new argument in `parse_args()` function to define the repo-id where we want to push the model.<jupyter_code># Adding HuggingFace argument parser.add_argument("--repo-id", type=str, default="ThomasSimonini/ppo-CartPole-v1", help="id of the model repository from the Hugging Face Hub {username/repo_name}")<jupyter_output><empty_output><jupyter_text>- Next, we add the methods needed to push the model to the Hub- These methods will: - `_evalutate_agent()`: evaluate the agent. - `_generate_model_card()`: generate the model card of your agent. - `_record_video()`: record a video of your agent.<jupyter_code>def package_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30, commit_message="Push agent to the Hub", token= None, logs=None ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the hub :param repo_id: id of the model repository from the Hugging Face Hub :param model: trained model :param eval_env: environment used to evaluate the agent :param fps: number of fps for rendering the video :param commit_message: commit message :param logs: directory on local machine of tensorboard logs you'd like to upload """ msg.info( "This function will save, evaluate, generate a video of your agent, " "create a model card and push everything to the hub. " "It might take up to 1min. \n " "This is a work in progress: if you encounter a bug, please open an issue." ) # Step 1: Clone or create the repo repo_url = HfApi().create_repo( repo_id=repo_id, token=token, private=False, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) # Step 2: Save the model torch.save(model.state_dict(), tmpdirname / "model.pt") # Step 3: Evaluate the model and build JSON mean_reward, std_reward = _evaluate_agent(eval_env, 10, model) # First get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters.env_id, "mean_reward": mean_reward, "std_reward": std_reward, "n_evaluation_episodes": 10, "eval_datetime": eval_form_datetime, } # Write a JSON file with open(tmpdirname / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 4: Generate a video video_path = tmpdirname / "replay.mp4" record_video(eval_env, model, video_path, video_fps) # Step 5: Generate the model card generated_model_card, metadata = _generate_model_card("PPO", hyperparameters.env_id, mean_reward, std_reward, hyperparameters) _save_model_card(tmpdirname, generated_model_card, metadata) # Step 6: Add logs if needed if logs: _add_logdir(tmpdirname, Path(logs)) msg.info(f"Pushing repo {repo_id} to the Hugging Face Hub") repo_url = upload_folder( repo_id=repo_id, folder_path=tmpdirname, path_in_repo="", commit_message=commit_message, token=token, ) msg.info(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") return repo_url def _evaluate_agent(env, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 while done is False: state = torch.Tensor(state).to(device) action, _, _, _ = policy.get_action_and_value(state) new_state, reward, done, info = env.step(action.cpu().numpy()) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward def record_video(env, policy, out_directory, fps=30): images = [] done = False state = env.reset() img = env.render(mode='rgb_array') images.append(img) while not done: state = torch.Tensor(state).to(device) # Take the action (index) that have the maximum expected future reward given that state action, _, _, _ = policy.get_action_and_value(state) state, reward, done, info = env.step(action.cpu().numpy()) # We directly put next_state = state for recording logic img = env.render(mode='rgb_array') images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def _generate_model_card(model_name, env_id, mean_reward, std_reward, hyperparameters): """ Generate the model card for the Hub :param model_name: name of the model :env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent :hyperparameters: training arguments """ # Step 1: Select the tags metadata = generate_metadata(model_name, env_id, mean_reward, std_reward) # Transform the hyperparams namespace to string converted_dict = vars(hyperparameters) converted_str = str(converted_dict) converted_str = converted_str.split(", ") converted_str = '\n'.join(converted_str) # Step 2: Generate the model card model_card = f""" # PPO Agent Playing {env_id} This is a trained model of a PPO agent playing {env_id}. # Hyperparameters ```python {converted_str} ``` """ return model_card, metadata def generate_metadata(model_name, env_id, mean_reward, std_reward): """ Define the tags for the model card :param model_name: name of the model :param env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent """ metadata = {} metadata["tags"] = [ env_id, "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course" ] # Add metrics eval = metadata_eval_result( model_pretty_name=model_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_id, dataset_id=env_id, ) # Merges both dictionaries metadata = {**metadata, **eval} return metadata def _save_model_card(local_path, generated_model_card, metadata): """Saves a model card for the repository. :param local_path: repository directory :param generated_model_card: model card generated by _generate_model_card() :param metadata: metadata """ readme_path = local_path / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = generated_model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) def _add_logdir(local_path: Path, logdir: Path): """Adds a logdir to the repository. :param local_path: repository directory :param logdir: logdir directory """ if logdir.exists() and logdir.is_dir(): # Add the logdir to the repository under new dir called logs repo_logdir = local_path / "logs" # Delete current logs if they exist if repo_logdir.exists(): shutil.rmtree(repo_logdir) # Copy logdir into repo logdir shutil.copytree(logdir, repo_logdir)<jupyter_output><empty_output><jupyter_text>- Finally, we call this function at the end of the PPO training<jupyter_code># Create the evaluation environment eval_env = gym.make(args.env_id) package_to_hub(repo_id = args.repo_id, model = agent, # The model we want to save hyperparameters = args, eval_env = gym.make(args.env_id), logs= f"runs/{run_name}", )<jupyter_output><empty_output><jupyter_text>- Here's what look the ppo.py final file<jupyter_code># docs and experiment results can be found at https://docs.cleanrl.dev/rl-algorithms/ppo/#ppopy import argparse import os import random import time from distutils.util import strtobool import gym import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.distributions.categorical import Categorical from torch.utils.tensorboard import SummaryWriter from huggingface_hub import HfApi, upload_folder from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import tempfile import json import shutil import imageio from wasabi import Printer msg = Printer() def parse_args(): # fmt: off parser = argparse.ArgumentParser() parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"), help="the name of this experiment") parser.add_argument("--seed", type=int, default=1, help="seed of the experiment") parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, `torch.backends.cudnn.deterministic=False`") parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="if toggled, cuda will be enabled by default") parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="if toggled, this experiment will be tracked with Weights and Biases") parser.add_argument("--wandb-project-name", type=str, default="cleanRL", help="the wandb's project name") parser.add_argument("--wandb-entity", type=str, default=None, help="the entity (team) of wandb's project") parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True, help="weather to capture videos of the agent performances (check out `videos` folder)") # Algorithm specific arguments parser.add_argument("--env-id", type=str, default="CartPole-v1", help="the id of the environment") parser.add_argument("--total-timesteps", type=int, default=50000, help="total timesteps of the experiments") parser.add_argument("--learning-rate", type=float, default=2.5e-4, help="the learning rate of the optimizer") parser.add_argument("--num-envs", type=int, default=4, help="the number of parallel game environments") parser.add_argument("--num-steps", type=int, default=128, help="the number of steps to run in each environment per policy rollout") parser.add_argument("--anneal-lr", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggle learning rate annealing for policy and value networks") parser.add_argument("--gae", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Use GAE for advantage computation") parser.add_argument("--gamma", type=float, default=0.99, help="the discount factor gamma") parser.add_argument("--gae-lambda", type=float, default=0.95, help="the lambda for the general advantage estimation") parser.add_argument("--num-minibatches", type=int, default=4, help="the number of mini-batches") parser.add_argument("--update-epochs", type=int, default=4, help="the K epochs to update the policy") parser.add_argument("--norm-adv", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggles advantages normalization") parser.add_argument("--clip-coef", type=float, default=0.2, help="the surrogate clipping coefficient") parser.add_argument("--clip-vloss", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True, help="Toggles whether or not to use a clipped loss for the value function, as per the paper.") parser.add_argument("--ent-coef", type=float, default=0.01, help="coefficient of the entropy") parser.add_argument("--vf-coef", type=float, default=0.5, help="coefficient of the value function") parser.add_argument("--max-grad-norm", type=float, default=0.5, help="the maximum norm for the gradient clipping") parser.add_argument("--target-kl", type=float, default=None, help="the target KL divergence threshold") # Adding HuggingFace argument parser.add_argument("--repo-id", type=str, default="ThomasSimonini/ppo-CartPole-v1", help="id of the model repository from the Hugging Face Hub {username/repo_name}") args = parser.parse_args() args.batch_size = int(args.num_envs * args.num_steps) args.minibatch_size = int(args.batch_size // args.num_minibatches) # fmt: on return args def package_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30, commit_message="Push agent to the Hub", token= None, logs=None ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the hub :param repo_id: id of the model repository from the Hugging Face Hub :param model: trained model :param eval_env: environment used to evaluate the agent :param fps: number of fps for rendering the video :param commit_message: commit message :param logs: directory on local machine of tensorboard logs you'd like to upload """ msg.info( "This function will save, evaluate, generate a video of your agent, " "create a model card and push everything to the hub. " "It might take up to 1min. \n " "This is a work in progress: if you encounter a bug, please open an issue." ) # Step 1: Clone or create the repo repo_url = HfApi().create_repo( repo_id=repo_id, token=token, private=False, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) # Step 2: Save the model torch.save(model.state_dict(), tmpdirname / "model.pt") # Step 3: Evaluate the model and build JSON mean_reward, std_reward = _evaluate_agent(eval_env, 10, model) # First get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters.env_id, "mean_reward": mean_reward, "std_reward": std_reward, "n_evaluation_episodes": 10, "eval_datetime": eval_form_datetime, } # Write a JSON file with open(tmpdirname / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 4: Generate a video video_path = tmpdirname / "replay.mp4" record_video(eval_env, model, video_path, video_fps) # Step 5: Generate the model card generated_model_card, metadata = _generate_model_card("PPO", hyperparameters.env_id, mean_reward, std_reward, hyperparameters) _save_model_card(tmpdirname, generated_model_card, metadata) # Step 6: Add logs if needed if logs: _add_logdir(tmpdirname, Path(logs)) msg.info(f"Pushing repo {repo_id} to the Hugging Face Hub") repo_url = upload_folder( repo_id=repo_id, folder_path=tmpdirname, path_in_repo="", commit_message=commit_message, token=token, ) msg.info(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") return repo_url def _evaluate_agent(env, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 while done is False: state = torch.Tensor(state).to(device) action, _, _, _ = policy.get_action_and_value(state) new_state, reward, done, info = env.step(action.cpu().numpy()) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward def record_video(env, policy, out_directory, fps=30): images = [] done = False state = env.reset() img = env.render(mode='rgb_array') images.append(img) while not done: state = torch.Tensor(state).to(device) # Take the action (index) that have the maximum expected future reward given that state action, _, _, _ = policy.get_action_and_value(state) state, reward, done, info = env.step(action.cpu().numpy()) # We directly put next_state = state for recording logic img = env.render(mode='rgb_array') images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def _generate_model_card(model_name, env_id, mean_reward, std_reward, hyperparameters): """ Generate the model card for the Hub :param model_name: name of the model :env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent :hyperparameters: training arguments """ # Step 1: Select the tags metadata = generate_metadata(model_name, env_id, mean_reward, std_reward) # Transform the hyperparams namespace to string converted_dict = vars(hyperparameters) converted_str = str(converted_dict) converted_str = converted_str.split(", ") converted_str = '\n'.join(converted_str) # Step 2: Generate the model card model_card = f""" # PPO Agent Playing {env_id} This is a trained model of a PPO agent playing {env_id}. # Hyperparameters ```python {converted_str} ``` """ return model_card, metadata def generate_metadata(model_name, env_id, mean_reward, std_reward): """ Define the tags for the model card :param model_name: name of the model :param env_id: name of the environment :mean_reward: mean reward of the agent :std_reward: standard deviation of the mean reward of the agent """ metadata = {} metadata["tags"] = [ env_id, "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course" ] # Add metrics eval = metadata_eval_result( model_pretty_name=model_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_id, dataset_id=env_id, ) # Merges both dictionaries metadata = {**metadata, **eval} return metadata def _save_model_card(local_path, generated_model_card, metadata): """Saves a model card for the repository. :param local_path: repository directory :param generated_model_card: model card generated by _generate_model_card() :param metadata: metadata """ readme_path = local_path / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = generated_model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) def _add_logdir(local_path: Path, logdir: Path): """Adds a logdir to the repository. :param local_path: repository directory :param logdir: logdir directory """ if logdir.exists() and logdir.is_dir(): # Add the logdir to the repository under new dir called logs repo_logdir = local_path / "logs" # Delete current logs if they exist if repo_logdir.exists(): shutil.rmtree(repo_logdir) # Copy logdir into repo logdir shutil.copytree(logdir, repo_logdir) def make_env(env_id, seed, idx, capture_video, run_name): def thunk(): env = gym.make(env_id) env = gym.wrappers.RecordEpisodeStatistics(env) if capture_video: if idx == 0: env = gym.wrappers.RecordVideo(env, f"videos/{run_name}") env.seed(seed) env.action_space.seed(seed) env.observation_space.seed(seed) return env return thunk def layer_init(layer, std=np.sqrt(2), bias_const=0.0): torch.nn.init.orthogonal_(layer.weight, std) torch.nn.init.constant_(layer.bias, bias_const) return layer class Agent(nn.Module): def __init__(self, envs): super().__init__() self.critic = nn.Sequential( layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64)), nn.Tanh(), layer_init(nn.Linear(64, 64)), nn.Tanh(), layer_init(nn.Linear(64, 1), std=1.0), ) self.actor = nn.Sequential( layer_init(nn.Linear(np.array(envs.single_observation_space.shape).prod(), 64)), nn.Tanh(), layer_init(nn.Linear(64, 64)), nn.Tanh(), layer_init(nn.Linear(64, envs.single_action_space.n), std=0.01), ) def get_value(self, x): return self.critic(x) def get_action_and_value(self, x, action=None): logits = self.actor(x) probs = Categorical(logits=logits) if action is None: action = probs.sample() return action, probs.log_prob(action), probs.entropy(), self.critic(x) if __name__ == "__main__": args = parse_args() run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}" if args.track: import wandb wandb.init( project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=run_name, monitor_gym=True, save_code=True, ) writer = SummaryWriter(f"runs/{run_name}") writer.add_text( "hyperparameters", "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])), ) # TRY NOT TO MODIFY: seeding random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = args.torch_deterministic device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") # env setup envs = gym.vector.SyncVectorEnv( [make_env(args.env_id, args.seed + i, i, args.capture_video, run_name) for i in range(args.num_envs)] ) assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported" agent = Agent(envs).to(device) optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5) # ALGO Logic: Storage setup obs = torch.zeros((args.num_steps, args.num_envs) + envs.single_observation_space.shape).to(device) actions = torch.zeros((args.num_steps, args.num_envs) + envs.single_action_space.shape).to(device) logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device) rewards = torch.zeros((args.num_steps, args.num_envs)).to(device) dones = torch.zeros((args.num_steps, args.num_envs)).to(device) values = torch.zeros((args.num_steps, args.num_envs)).to(device) # TRY NOT TO MODIFY: start the game global_step = 0 start_time = time.time() next_obs = torch.Tensor(envs.reset()).to(device) next_done = torch.zeros(args.num_envs).to(device) num_updates = args.total_timesteps // args.batch_size for update in range(1, num_updates + 1): # Annealing the rate if instructed to do so. if args.anneal_lr: frac = 1.0 - (update - 1.0) / num_updates lrnow = frac * args.learning_rate optimizer.param_groups[0]["lr"] = lrnow for step in range(0, args.num_steps): global_step += 1 * args.num_envs obs[step] = next_obs dones[step] = next_done # ALGO LOGIC: action logic with torch.no_grad(): action, logprob, _, value = agent.get_action_and_value(next_obs) values[step] = value.flatten() actions[step] = action logprobs[step] = logprob # TRY NOT TO MODIFY: execute the game and log data. next_obs, reward, done, info = envs.step(action.cpu().numpy()) rewards[step] = torch.tensor(reward).to(device).view(-1) next_obs, next_done = torch.Tensor(next_obs).to(device), torch.Tensor(done).to(device) for item in info: if "episode" in item.keys(): print(f"global_step={global_step}, episodic_return={item['episode']['r']}") writer.add_scalar("charts/episodic_return", item["episode"]["r"], global_step) writer.add_scalar("charts/episodic_length", item["episode"]["l"], global_step) break # bootstrap value if not done with torch.no_grad(): next_value = agent.get_value(next_obs).reshape(1, -1) if args.gae: advantages = torch.zeros_like(rewards).to(device) lastgaelam = 0 for t in reversed(range(args.num_steps)): if t == args.num_steps - 1: nextnonterminal = 1.0 - next_done nextvalues = next_value else: nextnonterminal = 1.0 - dones[t + 1] nextvalues = values[t + 1] delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t] advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam returns = advantages + values else: returns = torch.zeros_like(rewards).to(device) for t in reversed(range(args.num_steps)): if t == args.num_steps - 1: nextnonterminal = 1.0 - next_done next_return = next_value else: nextnonterminal = 1.0 - dones[t + 1] next_return = returns[t + 1] returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return advantages = returns - values # flatten the batch b_obs = obs.reshape((-1,) + envs.single_observation_space.shape) b_logprobs = logprobs.reshape(-1) b_actions = actions.reshape((-1,) + envs.single_action_space.shape) b_advantages = advantages.reshape(-1) b_returns = returns.reshape(-1) b_values = values.reshape(-1) # Optimizing the policy and value network b_inds = np.arange(args.batch_size) clipfracs = [] for epoch in range(args.update_epochs): np.random.shuffle(b_inds) for start in range(0, args.batch_size, args.minibatch_size): end = start + args.minibatch_size mb_inds = b_inds[start:end] _, newlogprob, entropy, newvalue = agent.get_action_and_value(b_obs[mb_inds], b_actions.long()[mb_inds]) logratio = newlogprob - b_logprobs[mb_inds] ratio = logratio.exp() with torch.no_grad(): # calculate approx_kl http://joschu.net/blog/kl-approx.html old_approx_kl = (-logratio).mean() approx_kl = ((ratio - 1) - logratio).mean() clipfracs += [((ratio - 1.0).abs() > args.clip_coef).float().mean().item()] mb_advantages = b_advantages[mb_inds] if args.norm_adv: mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8) # Policy loss pg_loss1 = -mb_advantages * ratio pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef) pg_loss = torch.max(pg_loss1, pg_loss2).mean() # Value loss newvalue = newvalue.view(-1) if args.clip_vloss: v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2 v_clipped = b_values[mb_inds] + torch.clamp( newvalue - b_values[mb_inds], -args.clip_coef, args.clip_coef, ) v_loss_clipped = (v_clipped - b_returns[mb_inds]) ** 2 v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped) v_loss = 0.5 * v_loss_max.mean() else: v_loss = 0.5 * ((newvalue - b_returns[mb_inds]) ** 2).mean() entropy_loss = entropy.mean() loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm) optimizer.step() if args.target_kl is not None: if approx_kl > args.target_kl: break y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy() var_y = np.var(y_true) explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y # TRY NOT TO MODIFY: record rewards for plotting purposes writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]["lr"], global_step) writer.add_scalar("losses/value_loss", v_loss.item(), global_step) writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step) writer.add_scalar("losses/entropy", entropy_loss.item(), global_step) writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), global_step) writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step) writer.add_scalar("losses/clipfrac", np.mean(clipfracs), global_step) writer.add_scalar("losses/explained_variance", explained_var, global_step) print("SPS:", int(global_step / (time.time() - start_time))) writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step) envs.close() writer.close() # Create the evaluation environment eval_env = gym.make(args.env_id) package_to_hub(repo_id = args.repo_id, model = agent, # The model we want to save hyperparameters = args, eval_env = gym.make(args.env_id), logs= f"runs/{run_name}", )<jupyter_output><empty_output><jupyter_text>To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token - Run the cell below and paste the token<jupyter_code>from huggingface_hub import notebook_login notebook_login() !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` Let's start the training 🔥- ⚠️ ⚠️ ⚠️ Don't use **the same repo id with the one you used for the Unit 1** - Now that you've coded from scratch PPO and added the Hugging Face Integration, we're ready to start the training 🔥 - First, you need to copy all your code to a file you create called `ppo.py` - Now we just need to run this python script using `python .py` with the additional parameters we defined with `argparse`- You should modify more hyperparameters otherwise the training will not be super stable.<jupyter_code>!python ppo.py --env-id="LunarLander-v2" --repo-id="YOUR_REPO_ID" --total-timesteps=50000<jupyter_output><empty_output>
deep-rl-class/notebooks/unit8/unit8_part1.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit8/unit8_part1.ipynb", "repo_id": "deep-rl-class", "token_count": 15492 }
87
# Quiz [[quiz]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What is Reinforcement Learning? <details> <summary>Solution</summary> Reinforcement learning is a **framework for solving control tasks (also called decision problems)** by building agents that learn from the environment by interacting with it through trial and error and **receiving rewards (positive or negative) as unique feedback**. </details> ### Q2: Define the RL Loop <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rl-loop-ex.jpg" alt="Exercise RL Loop"/> At every step: - Our Agent receives ______ from the environment - Based on that ______ the Agent takes an ______ - Our Agent will move to the right - The Environment goes to a ______ - The Environment gives a ______ to the Agent <Question choices={[ { text: "an action a0, action a0, state s0, state s1, reward r1", explain: "At every step: Our Agent receives **state s0** from the environment. Based on that **state s0** the Agent takes an **action a0**. Our Agent will move to the right. The Environment goes to a **new state s1**. The Environment gives **a reward r1** to the Agent." }, { text: "state s0, state s0, action a0, new state s1, reward r1", explain: "", correct: true }, { text: "a state s0, state s0, action a0, state s1, action a1", explain: "At every step: Our Agent receives **state s0** from the environment. Based on that **state s0** the Agent takes an **action a0**. Our Agent will move to the right. The Environment goes to a **new state s1**. The Environment gives **a reward r1** to the Agent." } ]} /> ### Q3: What's the difference between a state and an observation? <Question choices={[ { text: "The state is a complete description of the state of the world (there is no hidden information)", explain: "", correct: true }, { text: "The state is a partial description of the state", explain: "" }, { text: "The observation is a complete description of the state of the world (there is no hidden information)", explain: "" }, { text: "The observation is a partial description of the state", explain: "", correct: true }, { text: "We receive a state when we play with chess environment", explain: "Since we have access to the whole checkboard information.", correct: true }, { text: "We receive an observation when we play with chess environment", explain: "Since we have access to the whole checkboard information." }, { text: "We receive a state when we play with Super Mario Bros", explain: "We only see a part of the level close to the player, so we receive an observation." }, { text: "We receive an observation when we play with Super Mario Bros", explain: "We only see a part of the level close to the player.", correct: true } ]} /> ### Q4: A task is an instance of a Reinforcement Learning problem. What are the two types of tasks? <Question choices={[ { text: "Episodic", explain: "In Episodic task, we have a starting point and an ending point (a terminal state). This creates an episode: a list of States, Actions, Rewards, and new States. For instance, think about Super Mario Bros: an episode begin at the launch of a new Mario Level and ending when you’re killed or you reached the end of the level.", correct: true }, { text: "Recursive", explain: "" }, { text: "Adversarial", explain: "" }, { text: "Continuing", explain: "Continuing tasks are tasks that continue forever (no terminal state). In this case, the agent must learn how to choose the best actions and simultaneously interact with the environment.", correct: true } ]} /> ### Q5: What is the exploration/exploitation tradeoff? <details> <summary>Solution</summary> In Reinforcement Learning, we need to **balance how much we explore the environment and how much we exploit what we know about the environment**. - *Exploration* is exploring the environment by **trying random actions in order to find more information about the environment**. - *Exploitation* is **exploiting known information to maximize the reward**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%"> </details> ### Q6: What is a policy? <details> <summary>Solution</summary> - The Policy π **is the brain of our Agent**. It’s the function that tells us what action to take given the state we are in. So it defines the agent’s behavior at a given time. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_1.jpg" alt="Policy"> </details> ### Q7: What are value-based methods? <details> <summary>Solution</summary> - Value-based methods is one of the main approaches for solving RL problems. - In Value-based methods, instead of training a policy function, **we train a value function that maps a state to the expected value of being at that state**. </details> ### Q8: What are policy-based methods? <details> <summary>Solution</summary> - In *Policy-Based Methods*, we learn a **policy function directly**. - This policy function will **map from each state to the best corresponding action at that state**. Or a **probability distribution over the set of possible actions at that state**. </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read again the chapter to reinforce (😏) your knowledge, but **do not worry**: during the course we'll go over again of these concepts, and you'll **reinforce your theoretical knowledge with hands-on**.
deep-rl-class/units/en/unit1/quiz.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/quiz.mdx", "repo_id": "deep-rl-class", "token_count": 1866 }
88
# Q-Learning Recap [[q-learning-recap]] *Q-Learning* **is the RL algorithm that** : - Trains a *Q-function*, an **action-value function** encoded, in internal memory, by a *Q-table* **containing all the state-action pair values.** - Given a state and action, our Q-function **will search its Q-table for the corresponding value.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q function" width="100%"/> - When the training is done, **we have an optimal Q-function, or, equivalently, an optimal Q-table.** - And if we **have an optimal Q-function**, we have an optimal policy, since we **know, for each state, the best action to take.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" width="100%"/> But, in the beginning, our **Q-table is useless since it gives arbitrary values for each state-action pair (most of the time we initialize the Q-table to 0 values)**. But, as we explore the environment and update our Q-table it will give us a better and better approximation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit2/q-learning.jpeg" alt="q-learning.jpeg" width="100%"/> This is the Q-Learning pseudocode: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-Learning" width="100%"/>
deep-rl-class/units/en/unit2/q-learning-recap.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning-recap.mdx", "repo_id": "deep-rl-class", "token_count": 505 }
89
# Conclusion **Congrats on finishing this unit**! There was a lot of information. And congrats on finishing the tutorial. You've just coded your first Deep Reinforcement Learning agent from scratch using PyTorch and shared it on the Hub 🥳. Don't hesitate to iterate on this unit **by improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? In the next unit, **we're going to learn more about Unity MLAgents**, by training agents in Unity environments. This way, you will be ready to participate in the **AI vs AI challenges where you'll train your agents to compete against other agents in a snowball fight and a soccer game.** Sound fun? See you next time! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit4/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 250 }
90
# The SnowballTarget Environment <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget.gif" alt="SnowballTarget"/> SnowballTarget is an environment we created at Hugging Face using assets from [Kay Lousberg](https://kaylousberg.com/). We have an optional section at the end of this Unit **if you want to learn to use Unity and create your environments**. ## The agent's Goal The first agent you're going to train is called Julien the bear 🐻. Julien is trained **to hit targets with snowballs**. The Goal in this environment is that Julien **hits as many targets as possible in the limited time** (1000 timesteps). It will need **to place itself correctly in relation to the target and shoot**to do that. In addition, to avoid "snowball spamming" (aka shooting a snowball every timestep), **Julien has a "cool off" system** (it needs to wait 0.5 seconds after a shoot to be able to shoot again). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/cooloffsystem.gif" alt="Cool Off System"/> <figcaption>The agent needs to wait 0.5s before being able to shoot a snowball again</figcaption> </figure> ## The reward function and the reward engineering problem The reward function is simple. **The environment gives a +1 reward every time the agent's snowball hits a target**. Because the agent's Goal is to maximize the expected cumulative reward, **it will try to hit as many targets as possible**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget_reward.png" alt="Reward system"/> We could have a more complex reward function (with a penalty to push the agent to go faster, for example). But when you design an environment, you need to avoid the *reward engineering problem*, which is having a too complex reward function to force your agent to behave as you want it to do. Why? Because by doing that, **you might miss interesting strategies that the agent will find with a simpler reward function**. In terms of code, it looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget-reward-code.png" alt="Reward"/> ## The observation space Regarding observations, we don't use normal vision (frame), but **we use raycasts**. Think of raycasts as lasers that will detect if they pass through an object. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/raycasts.png" alt="Raycasts"/> <figcaption>Source: <a href="https://github.com/Unity-Technologies/ml-agents">ML-Agents documentation</a></figcaption> </figure> In this environment, our agent has multiple set of raycasts: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowball_target_raycasts.png" alt="Raycasts"/> In addition to raycasts, the agent gets a "can I shoot" bool as observation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget-obs-code.png" alt="Obs"/> ## The action space The action space is discrete: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget_action_space.png" alt="Action Space"/>
deep-rl-class/units/en/unit5/snowball-target.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/snowball-target.mdx", "repo_id": "deep-rl-class", "token_count": 1019 }
91
# Additional Readings [[additional-readings]] These are **optional readings** if you want to go deeper. ## PPO Explained - [Towards Delivering a Coherent Self-Contained Explanation of Proximal Policy Optimization by Daniel Bick](https://fse.studenttheses.ub.rug.nl/25709/1/mAI_2021_BickD.pdf) - [What is the way to understand Proximal Policy Optimization Algorithm in RL?](https://stackoverflow.com/questions/46422845/what-is-the-way-to-understand-proximal-policy-optimization-algorithm-in-rl) - [Foundations of Deep RL Series, L4 TRPO and PPO by Pieter Abbeel](https://youtu.be/KjWF8VIMGiY) - [OpenAI PPO Blogpost](https://openai.com/blog/openai-baselines-ppo/) - [Spinning Up RL PPO](https://spinningup.openai.com/en/latest/algorithms/ppo.html) - [Paper Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347) ## PPO Implementation details - [The 37 Implementation Details of Proximal Policy Optimization](https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/) - [Part 1 of 3 — Proximal Policy Optimization Implementation: 11 Core Implementation Details](https://www.youtube.com/watch?v=MEt6rrxH8W4) ## Importance Sampling - [Importance Sampling Explained](https://youtu.be/C3p2wI4RAi8)
deep-rl-class/units/en/unit8/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 418 }
92
# Introduction [[introduction]] One of the most critical tasks in Deep Reinforcement Learning is to **find a good set of training hyperparameters**. <img src="https://raw.githubusercontent.com/optuna/optuna/master/docs/image/optuna-logo.png" alt="Optuna Logo"/> [Optuna](https://optuna.org/) is a library that helps you to automate the search. In this Unit, we'll study a **little bit of the theory behind automatic hyperparameter tuning**. We'll first try to optimize the parameters of the DQN studied in the last unit manually. We'll then **learn how to automate the search using Optuna**.
deep-rl-class/units/en/unitbonus2/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 156 }
93
import argparse import sys sys.path.append(".") from base_classes import IPAdapterTextToImageBenchmark # noqa: E402 IP_ADAPTER_CKPTS = { "runwayml/stable-diffusion-v1-5": ("h94/IP-Adapter", "ip-adapter_sd15.bin"), "stabilityai/stable-diffusion-xl-base-1.0": ("h94/IP-Adapter", "ip-adapter_sdxl.bin"), } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="runwayml/stable-diffusion-v1-5", choices=list(IP_ADAPTER_CKPTS.keys()), ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() args.ip_adapter_id = IP_ADAPTER_CKPTS[args.ckpt] benchmark_pipe = IPAdapterTextToImageBenchmark(args) args.ckpt = f"{args.ckpt} (IP-Adapter)" benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_ip_adapters.py/0
{ "file_path": "diffusers/benchmarks/benchmark_ip_adapters.py", "repo_id": "diffusers", "token_count": 434 }
94
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Logging 🤗 Diffusers has a centralized logging system to easily manage the verbosity of the library. The default verbosity is set to `WARNING`. To change the verbosity level, use one of the direct setters. For instance, to change the verbosity to the `INFO` level. ```python import diffusers diffusers.logging.set_verbosity_info() ``` You can also use the environment variable `DIFFUSERS_VERBOSITY` to override the default verbosity. You can set it to one of the following: `debug`, `info`, `warning`, `error`, `critical`. For example: ```bash DIFFUSERS_VERBOSITY=error ./myprogram.py ``` Additionally, some `warnings` can be disabled by setting the environment variable `DIFFUSERS_NO_ADVISORY_WARNINGS` to a true value, like `1`. This disables any warning logged by [`logger.warning_advice`]. For example: ```bash DIFFUSERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py ``` Here is an example of how to use the same logger as the library in your own module or script: ```python from diffusers.utils import logging logging.set_verbosity_info() logger = logging.get_logger("diffusers") logger.info("INFO") logger.warning("WARN") ``` All methods of the logging module are documented below. The main methods are [`logging.get_verbosity`] to get the current level of verbosity in the logger and [`logging.set_verbosity`] to set the verbosity to the level of your choice. In order from the least verbose to the most verbose: | Method | Integer value | Description | |----------------------------------------------------------:|--------------:|----------------------------------------------------:| | `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` | 50 | only report the most critical errors | | `diffusers.logging.ERROR` | 40 | only report errors | | `diffusers.logging.WARNING` or `diffusers.logging.WARN` | 30 | only report errors and warnings (default) | | `diffusers.logging.INFO` | 20 | only report errors, warnings, and basic information | | `diffusers.logging.DEBUG` | 10 | report all information | By default, `tqdm` progress bars are displayed during model download. [`logging.disable_progress_bar`] and [`logging.enable_progress_bar`] are used to enable or disable this behavior. ## Base setters [[autodoc]] utils.logging.set_verbosity_error [[autodoc]] utils.logging.set_verbosity_warning [[autodoc]] utils.logging.set_verbosity_info [[autodoc]] utils.logging.set_verbosity_debug ## Other functions [[autodoc]] utils.logging.get_verbosity [[autodoc]] utils.logging.set_verbosity [[autodoc]] utils.logging.get_logger [[autodoc]] utils.logging.enable_default_handler [[autodoc]] utils.logging.disable_default_handler [[autodoc]] utils.logging.enable_explicit_format [[autodoc]] utils.logging.reset_format [[autodoc]] utils.logging.enable_progress_bar [[autodoc]] utils.logging.disable_progress_bar
diffusers/docs/source/en/api/logging.md/0
{ "file_path": "diffusers/docs/source/en/api/logging.md", "repo_id": "diffusers", "token_count": 1351 }
95
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DeepFloyd IF ## Overview DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding. The model is a modular composed of a frozen text encoder and three cascaded pixel diffusion modules: - Stage 1: a base model that generates 64x64 px image based on text prompt, - Stage 2: a 64x64 px => 256x256 px super-resolution model, and - Stage 3: a 256x256 px => 1024x1024 px super-resolution model Stage 1 and Stage 2 utilize a frozen text encoder based on the T5 transformer to extract text embeddings, which are then fed into a UNet architecture enhanced with cross-attention and attention pooling. Stage 3 is [Stability AI's x4 Upscaling model](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler). The result is a highly efficient model that outperforms current state-of-the-art models, achieving a zero-shot FID score of 6.66 on the COCO dataset. Our work underscores the potential of larger UNet architectures in the first stage of cascaded diffusion models and depicts a promising future for text-to-image synthesis. ## Usage Before you can use IF, you need to accept its usage conditions. To do so: 1. Make sure to have a [Hugging Face account](https://huggingface.co/join) and be logged in. 2. Accept the license on the model card of [DeepFloyd/IF-I-XL-v1.0](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0). Accepting the license on the stage I model card will auto accept for the other IF models. 3. Make sure to login locally. Install `huggingface_hub`: ```sh pip install huggingface_hub --upgrade ``` run the login function in a Python shell: ```py from huggingface_hub import login login() ``` and enter your [Hugging Face Hub access token](https://huggingface.co/docs/hub/security-tokens#what-are-user-access-tokens). Next we install `diffusers` and dependencies: ```sh pip install -q diffusers accelerate transformers ``` The following sections give more in-detail examples of how to use IF. Specifically: - [Text-to-Image Generation](#text-to-image-generation) - [Image-to-Image Generation](#text-guided-image-to-image-generation) - [Inpainting](#text-guided-inpainting-generation) - [Reusing model weights](#converting-between-different-pipelines) - [Speed optimization](#optimizing-for-speed) - [Memory optimization](#optimizing-for-memory) **Available checkpoints** - *Stage-1* - [DeepFloyd/IF-I-XL-v1.0](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0) - [DeepFloyd/IF-I-L-v1.0](https://huggingface.co/DeepFloyd/IF-I-L-v1.0) - [DeepFloyd/IF-I-M-v1.0](https://huggingface.co/DeepFloyd/IF-I-M-v1.0) - *Stage-2* - [DeepFloyd/IF-II-L-v1.0](https://huggingface.co/DeepFloyd/IF-II-L-v1.0) - [DeepFloyd/IF-II-M-v1.0](https://huggingface.co/DeepFloyd/IF-II-M-v1.0) - *Stage-3* - [stabilityai/stable-diffusion-x4-upscaler](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler) **Google Colab** [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/deepfloyd_if_free_tier_google_colab.ipynb) ### Text-to-Image Generation By default diffusers makes use of [model cpu offloading](../../optimization/memory#model-offloading) to run the whole IF pipeline with as little as 14 GB of VRAM. ```python from diffusers import DiffusionPipeline from diffusers.utils import pt_to_pil, make_image_grid import torch # stage 1 stage_1 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) stage_1.enable_model_cpu_offload() # stage 2 stage_2 = DiffusionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 ) stage_2.enable_model_cpu_offload() # stage 3 safety_modules = { "feature_extractor": stage_1.feature_extractor, "safety_checker": stage_1.safety_checker, "watermarker": stage_1.watermarker, } stage_3 = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 ) stage_3.enable_model_cpu_offload() prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' generator = torch.manual_seed(1) # text embeds prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) # stage 1 stage_1_output = stage_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt" ).images #pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") # stage 2 stage_2_output = stage_2( image=stage_1_output, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt", ).images #pt_to_pil(stage_2_output)[0].save("./if_stage_II.png") # stage 3 stage_3_output = stage_3(prompt=prompt, image=stage_2_output, noise_level=100, generator=generator).images #stage_3_output[0].save("./if_stage_III.png") make_image_grid([pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0], stage_3_output[0]], rows=1, rows=3) ``` ### Text Guided Image-to-Image Generation The same IF model weights can be used for text-guided image-to-image translation or image variation. In this case just make sure to load the weights using the [`IFImg2ImgPipeline`] and [`IFImg2ImgSuperResolutionPipeline`] pipelines. **Note**: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines without loading them twice by making use of the [`~DiffusionPipeline.components`] argument as explained [here](#converting-between-different-pipelines). ```python from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline from diffusers.utils import pt_to_pil, load_image, make_image_grid import torch # download image url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image = original_image.resize((768, 512)) # stage 1 stage_1 = IFImg2ImgPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) stage_1.enable_model_cpu_offload() # stage 2 stage_2 = IFImg2ImgSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 ) stage_2.enable_model_cpu_offload() # stage 3 safety_modules = { "feature_extractor": stage_1.feature_extractor, "safety_checker": stage_1.safety_checker, "watermarker": stage_1.watermarker, } stage_3 = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 ) stage_3.enable_model_cpu_offload() prompt = "A fantasy landscape in style minecraft" generator = torch.manual_seed(1) # text embeds prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) # stage 1 stage_1_output = stage_1( image=original_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt", ).images #pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") # stage 2 stage_2_output = stage_2( image=stage_1_output, original_image=original_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt", ).images #pt_to_pil(stage_2_output)[0].save("./if_stage_II.png") # stage 3 stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=100).images #stage_3_output[0].save("./if_stage_III.png") make_image_grid([original_image, pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0], stage_3_output[0]], rows=1, rows=4) ``` ### Text Guided Inpainting Generation The same IF model weights can be used for text-guided image-to-image translation or image variation. In this case just make sure to load the weights using the [`IFInpaintingPipeline`] and [`IFInpaintingSuperResolutionPipeline`] pipelines. **Note**: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines without loading them twice by making use of the [`~DiffusionPipeline.components()`] function as explained [here](#converting-between-different-pipelines). ```python from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline from diffusers.utils import pt_to_pil, load_image, make_image_grid import torch # download image url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" original_image = load_image(url) # download mask url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" mask_image = load_image(url) # stage 1 stage_1 = IFInpaintingPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) stage_1.enable_model_cpu_offload() # stage 2 stage_2 = IFInpaintingSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 ) stage_2.enable_model_cpu_offload() # stage 3 safety_modules = { "feature_extractor": stage_1.feature_extractor, "safety_checker": stage_1.safety_checker, "watermarker": stage_1.watermarker, } stage_3 = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 ) stage_3.enable_model_cpu_offload() prompt = "blue sunglasses" generator = torch.manual_seed(1) # text embeds prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) # stage 1 stage_1_output = stage_1( image=original_image, mask_image=mask_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt", ).images #pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") # stage 2 stage_2_output = stage_2( image=stage_1_output, original_image=original_image, mask_image=mask_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt", ).images #pt_to_pil(stage_1_output)[0].save("./if_stage_II.png") # stage 3 stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=100).images #stage_3_output[0].save("./if_stage_III.png") make_image_grid([original_image, mask_image, pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0], stage_3_output[0]], rows=1, rows=5) ``` ### Converting between different pipelines In addition to being loaded with `from_pretrained`, Pipelines can also be loaded directly from each other. ```python from diffusers import IFPipeline, IFSuperResolutionPipeline pipe_1 = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0") pipe_2 = IFSuperResolutionPipeline.from_pretrained("DeepFloyd/IF-II-L-v1.0") from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline pipe_1 = IFImg2ImgPipeline(**pipe_1.components) pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components) from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline pipe_1 = IFInpaintingPipeline(**pipe_1.components) pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components) ``` ### Optimizing for speed The simplest optimization to run IF faster is to move all model components to the GPU. ```py pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe.to("cuda") ``` You can also run the diffusion process for a shorter number of timesteps. This can either be done with the `num_inference_steps` argument: ```py pipe("<prompt>", num_inference_steps=30) ``` Or with the `timesteps` argument: ```py from diffusers.pipelines.deepfloyd_if import fast27_timesteps pipe("<prompt>", timesteps=fast27_timesteps) ``` When doing image variation or inpainting, you can also decrease the number of timesteps with the strength argument. The strength argument is the amount of noise to add to the input image which also determines how many steps to run in the denoising process. A smaller number will vary the image less but run faster. ```py pipe = IFImg2ImgPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe.to("cuda") image = pipe(image=image, prompt="<prompt>", strength=0.3).images ``` You can also use [`torch.compile`](../../optimization/torch2.0). Note that we have not exhaustively tested `torch.compile` with IF and it might not give expected results. ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe.to("cuda") pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead", fullgraph=True) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` ### Optimizing for memory When optimizing for GPU memory, we can use the standard diffusers CPU offloading APIs. Either the model based CPU offloading, ```py pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() ``` or the more aggressive layer based CPU offloading. ```py pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe.enable_sequential_cpu_offload() ``` Additionally, T5 can be loaded in 8bit precision ```py from transformers import T5EncoderModel text_encoder = T5EncoderModel.from_pretrained( "DeepFloyd/IF-I-XL-v1.0", subfolder="text_encoder", device_map="auto", load_in_8bit=True, variant="8bit" ) from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained( "DeepFloyd/IF-I-XL-v1.0", text_encoder=text_encoder, # pass the previously instantiated 8bit text encoder unet=None, device_map="auto", ) prompt_embeds, negative_embeds = pipe.encode_prompt("<prompt>") ``` For CPU RAM constrained machines like Google Colab free tier where we can't load all model components to the CPU at once, we can manually only load the pipeline with the text encoder or UNet when the respective model components are needed. ```py from diffusers import IFPipeline, IFSuperResolutionPipeline import torch import gc from transformers import T5EncoderModel from diffusers.utils import pt_to_pil, make_image_grid text_encoder = T5EncoderModel.from_pretrained( "DeepFloyd/IF-I-XL-v1.0", subfolder="text_encoder", device_map="auto", load_in_8bit=True, variant="8bit" ) # text to image pipe = DiffusionPipeline.from_pretrained( "DeepFloyd/IF-I-XL-v1.0", text_encoder=text_encoder, # pass the previously instantiated 8bit text encoder unet=None, device_map="auto", ) prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) # Remove the pipeline so we can re-load the pipeline with the unet del text_encoder del pipe gc.collect() torch.cuda.empty_cache() pipe = IFPipeline.from_pretrained( "DeepFloyd/IF-I-XL-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16, device_map="auto" ) generator = torch.Generator().manual_seed(0) stage_1_output = pipe( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt", generator=generator, ).images #pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") # Remove the pipeline so we can load the super-resolution pipeline del pipe gc.collect() torch.cuda.empty_cache() # First super resolution pipe = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16, device_map="auto" ) generator = torch.Generator().manual_seed(0) stage_2_output = pipe( image=stage_1_output, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt", generator=generator, ).images #pt_to_pil(stage_2_output)[0].save("./if_stage_II.png") make_image_grid([pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0]], rows=1, rows=2) ``` ## Available Pipelines: | Pipeline | Tasks | Colab |---|---|:---:| | [pipeline_if.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py) | *Text-to-Image Generation* | - | | [pipeline_if_superresolution.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py) | *Text-to-Image Generation* | - | | [pipeline_if_img2img.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py) | *Image-to-Image Generation* | - | | [pipeline_if_img2img_superresolution.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py) | *Image-to-Image Generation* | - | | [pipeline_if_inpainting.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py) | *Image-to-Image Generation* | - | | [pipeline_if_inpainting_superresolution.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py) | *Image-to-Image Generation* | - | ## IFPipeline [[autodoc]] IFPipeline - all - __call__ ## IFSuperResolutionPipeline [[autodoc]] IFSuperResolutionPipeline - all - __call__ ## IFImg2ImgPipeline [[autodoc]] IFImg2ImgPipeline - all - __call__ ## IFImg2ImgSuperResolutionPipeline [[autodoc]] IFImg2ImgSuperResolutionPipeline - all - __call__ ## IFInpaintingPipeline [[autodoc]] IFInpaintingPipeline - all - __call__ ## IFInpaintingSuperResolutionPipeline [[autodoc]] IFInpaintingSuperResolutionPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/deepfloyd_if.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/deepfloyd_if.md", "repo_id": "diffusers", "token_count": 6743 }
96
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Diffusion 2 Stable Diffusion 2 is a text-to-image _latent diffusion_ model built upon the work of the original [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release), and it was led by Robin Rombach and Katherine Crowson from [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). *The Stable Diffusion 2.0 release includes robust text-to-image models trained using a brand new text encoder (OpenCLIP), developed by LAION with support from Stability AI, which greatly improves the quality of the generated images compared to earlier V1 releases. The text-to-image models in this release can generate images with default resolutions of both 512x512 pixels and 768x768 pixels. These models are trained on an aesthetic subset of the [LAION-5B dataset](https://laion.ai/blog/laion-5b/) created by the DeepFloyd team at Stability AI, which is then further filtered to remove adult content using [LAION’s NSFW filter](https://openreview.net/forum?id=M3Y74vmsMcY).* For more details about how Stable Diffusion 2 works and how it differs from the original Stable Diffusion, please refer to the official [announcement post](https://stability.ai/blog/stable-diffusion-v2-release). The architecture of Stable Diffusion 2 is more or less identical to the original [Stable Diffusion model](./text2img) so check out it's API documentation for how to use Stable Diffusion 2. We recommend using the [`DPMSolverMultistepScheduler`] as it gives a reasonable speed/quality trade-off and can be run with as little as 20 steps. Stable Diffusion 2 is available for tasks like text-to-image, inpainting, super-resolution, and depth-to-image: | Task | Repository | |-------------------------|---------------------------------------------------------------------------------------------------------------| | text-to-image (512x512) | [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) | | text-to-image (768x768) | [stabilityai/stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) | | inpainting | [stabilityai/stable-diffusion-2-inpainting](https://huggingface.co/stabilityai/stable-diffusion-2-inpainting) | | super-resolution | [stable-diffusion-x4-upscaler](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler) | | depth-to-image | [stabilityai/stable-diffusion-2-depth](https://huggingface.co/stabilityai/stable-diffusion-2-depth) | Here are some examples for how to use Stable Diffusion 2 for each task: <Tip> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! </Tip> ## Text-to-image ```py from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler import torch repo_id = "stabilityai/stable-diffusion-2-base" pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") prompt = "High quality photo of an astronaut riding a horse in space" image = pipe(prompt, num_inference_steps=25).images[0] image ``` ## Inpainting ```py import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import load_image, make_image_grid img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).resize((512, 512)) mask_image = load_image(mask_url).resize((512, 512)) repo_id = "stabilityai/stable-diffusion-2-inpainting" pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") prompt = "Face of a yellow cat, high resolution, sitting on a park bench" image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=25).images[0] make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ## Super-resolution ```py from diffusers import StableDiffusionUpscalePipeline from diffusers.utils import load_image, make_image_grid import torch # load model and scheduler model_id = "stabilityai/stable-diffusion-x4-upscaler" pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipeline = pipeline.to("cuda") # let's download an image url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png" low_res_img = load_image(url) low_res_img = low_res_img.resize((128, 128)) prompt = "a white cat" upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[0] make_image_grid([low_res_img.resize((512, 512)), upscaled_image.resize((512, 512))], rows=1, cols=2) ``` ## Depth-to-image ```py import torch from diffusers import StableDiffusionDepth2ImgPipeline from diffusers.utils import load_image, make_image_grid pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, ).to("cuda") url = "http://images.cocodataset.org/val2017/000000039769.jpg" init_image = load_image(url) prompt = "two tigers" negative_prompt = "bad, deformed, ugly, bad anotomy" image = pipe(prompt=prompt, image=init_image, negative_prompt=negative_prompt, strength=0.7).images[0] make_image_grid([init_image, image], rows=1, cols=2) ```
diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_2.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_2.md", "repo_id": "diffusers", "token_count": 2283 }
97
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Effective and efficient diffusion [[open-in-colab]] Getting the [`DiffusionPipeline`] to generate images in a certain style or include what you want can be tricky. Often times, you have to run the [`DiffusionPipeline`] several times before you end up with an image you're happy with. But generating something out of nothing is a computationally intensive process, especially if you're running inference over and over again. This is why it's important to get the most *computational* (speed) and *memory* (GPU vRAM) efficiency from the pipeline to reduce the time between inference cycles so you can iterate faster. This tutorial walks you through how to generate faster and better with the [`DiffusionPipeline`]. Begin by loading the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) model: ```python from diffusers import DiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True) ``` The example prompt you'll use is a portrait of an old warrior chief, but feel free to use your own prompt: ```python prompt = "portrait photo of a old warrior chief" ``` ## Speed <Tip> 💡 If you don't have access to a GPU, you can use one for free from a GPU provider like [Colab](https://colab.research.google.com/)! </Tip> One of the simplest ways to speed up inference is to place the pipeline on a GPU the same way you would with any PyTorch module: ```python pipeline = pipeline.to("cuda") ``` To make sure you can use the same image and improve on it, use a [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) and set a seed for [reproducibility](./using-diffusers/reproducibility): ```python import torch generator = torch.Generator("cuda").manual_seed(0) ``` Now you can generate an image: ```python image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png"> </div> This process took ~30 seconds on a T4 GPU (it might be faster if your allocated GPU is better than a T4). By default, the [`DiffusionPipeline`] runs inference with full `float32` precision for 50 inference steps. You can speed this up by switching to a lower precision like `float16` or running fewer inference steps. Let's start by loading the model in `float16` and generate an image: ```python import torch pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True) pipeline = pipeline.to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png"> </div> This time, it only took ~11 seconds to generate the image, which is almost 3x faster than before! <Tip> 💡 We strongly suggest always running your pipelines in `float16`, and so far, we've rarely seen any degradation in output quality. </Tip> Another option is to reduce the number of inference steps. Choosing a more efficient scheduler could help decrease the number of steps without sacrificing output quality. You can find which schedulers are compatible with the current model in the [`DiffusionPipeline`] by calling the `compatibles` method: ```python pipeline.scheduler.compatibles [ diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, diffusers.utils.dummy_torch_and_torchsde_objects.DPMSolverSDEScheduler, diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, ] ``` The Stable Diffusion model uses the [`PNDMScheduler`] by default which usually requires ~50 inference steps, but more performant schedulers like [`DPMSolverMultistepScheduler`], require only ~20 or 25 inference steps. Use the [`~ConfigMixin.from_config`] method to load a new scheduler: ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) ``` Now set the `num_inference_steps` to 20: ```python generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png"> </div> Great, you've managed to cut the inference time to just 4 seconds! ⚡️ ## Memory The other key to improving pipeline performance is consuming less memory, which indirectly implies more speed, since you're often trying to maximize the number of images generated per second. The easiest way to see how many images you can generate at once is to try out different batch sizes until you get an `OutOfMemoryError` (OOM). Create a function that'll generate a batch of images from a list of prompts and `Generators`. Make sure to assign each `Generator` a seed so you can reuse it if it produces a good result. ```python def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} ``` Start with `batch_size=4` and see how much memory you've consumed: ```python from diffusers.utils import make_image_grid images = pipeline(**get_inputs(batch_size=4)).images make_image_grid(images, 2, 2) ``` Unless you have a GPU with more vRAM, the code above probably returned an `OOM` error! Most of the memory is taken up by the cross-attention layers. Instead of running this operation in a batch, you can run it sequentially to save a significant amount of memory. All you have to do is configure the pipeline to use the [`~DiffusionPipeline.enable_attention_slicing`] function: ```python pipeline.enable_attention_slicing() ``` Now try increasing the `batch_size` to 8! ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png"> </div> Whereas before you couldn't even generate a batch of 4 images, now you can generate a batch of 8 images at ~3.5 seconds per image! This is probably the fastest you can go on a T4 GPU without sacrificing quality. ## Quality In the last two sections, you learned how to optimize the speed of your pipeline by using `fp16`, reducing the number of inference steps by using a more performant scheduler, and enabling attention slicing to reduce memory consumption. Now you're going to focus on how to improve the quality of generated images. ### Better checkpoints The most obvious step is to use better checkpoints. The Stable Diffusion model is a good starting point, and since its official launch, several improved versions have also been released. However, using a newer version doesn't automatically mean you'll get better results. You'll still have to experiment with different checkpoints yourself, and do a little research (such as using [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/)) to get the best results. As the field grows, there are more and more high-quality checkpoints finetuned to produce certain styles. Try exploring the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) and [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) to find one you're interested in! ### Better pipeline components You can also try replacing the current pipeline components with a newer version. Let's try loading the latest [autoencoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae) from Stability AI into the pipeline, and generate some images: ```python from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") pipeline.vae = vae images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png"> </div> ### Better prompt engineering The text prompt you use to generate an image is super important, so much so that it is called *prompt engineering*. Some considerations to keep during prompt engineering are: - How is the image or similar images of the one I want to generate stored on the internet? - What additional detail can I give that steers the model towards the style I want? With this in mind, let's improve the prompt to include color and higher quality details: ```python prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" ``` Generate a batch of images with the new prompt: ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png"> </div> Pretty impressive! Let's tweak the second image - corresponding to the `Generator` with a seed of `1` - a bit more by adding some text about the age of the subject: ```python prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images make_image_grid(images, 2, 2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png"> </div> ## Next steps In this tutorial, you learned how to optimize a [`DiffusionPipeline`] for computational and memory efficiency as well as improving the quality of generated outputs. If you're interested in making your pipeline even faster, take a look at the following resources: - Learn how [PyTorch 2.0](./optimization/torch2.0) and [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) can yield 5 - 300% faster inference speed. On an A100 GPU, inference can be up to 50% faster! - If you can't use PyTorch 2, we recommend you install [xFormers](./optimization/xformers). Its memory-efficient attention mechanism works great with PyTorch 1.13.1 for faster speed and reduced memory consumption. - Other optimization techniques, such as model offloading, are covered in [this guide](./optimization/fp16).
diffusers/docs/source/en/stable_diffusion.md/0
{ "file_path": "diffusers/docs/source/en/stable_diffusion.md", "repo_id": "diffusers", "token_count": 3962 }
98
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual Inversion [Textual Inversion](https://hf.co/papers/2208.01618) is a training technique for personalizing image generation models with just a few example images of what you want it to learn. This technique works by learning and updating the text embeddings (the new embeddings are tied to a special word you must use in the prompt) to match the example images you provide. If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. With the same configuration and setup as PyTorch, the Flax training script should be at least ~70% faster! This guide will explore the [textual_inversion.py](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Navigate to the example folder with the training script and install the required dependencies for the script you're using: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/textual_inversion pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/textual_inversion pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script has many parameters to help you tailor the training run to your needs. All of the parameters and their descriptions are listed in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/839c2a5ece0af4e75530cb520d77bc7ed8acf474/examples/textual_inversion/textual_inversion.py#L176) function. Where applicable, Diffusers provides default values for each parameter such as the training batch size and learning rate, but feel free to change these values in the training command if you'd like. For example, to increase the number of gradient accumulation steps above the default value of 1: ```bash accelerate launch textual_inversion.py \ --gradient_accumulation_steps=4 ``` Some other basic and important parameters to specify include: - `--pretrained_model_name_or_path`: the name of the model on the Hub or a local path to the pretrained model - `--train_data_dir`: path to a folder containing the training dataset (example images) - `--output_dir`: where to save the trained model - `--push_to_hub`: whether to push the trained model to the Hub - `--checkpointing_steps`: frequency of saving a checkpoint as the model trains; this is useful if for some reason training is interrupted, you can continue training from that checkpoint by adding `--resume_from_checkpoint` to your training command - `--num_vectors`: the number of vectors to learn the embeddings with; increasing this parameter helps the model learn better but it comes with increased training costs - `--placeholder_token`: the special word to tie the learned embeddings to (you must use the word in your prompt for inference) - `--initializer_token`: a single-word that roughly describes the object or style you're trying to train on - `--learnable_property`: whether you're training the model to learn a new "style" (for example, Van Gogh's painting style) or "object" (for example, your dog) ## Training script Unlike some of the other training scripts, textual_inversion.py has a custom dataset class, [`TextualInversionDataset`](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L487) for creating a dataset. You can customize the image size, placeholder token, interpolation method, whether to crop the image, and more. If you need to change how the dataset is created, you can modify `TextualInversionDataset`. Next, you'll find the dataset preprocessing code and training loop in the [`main()`](https://github.com/huggingface/diffusers/blob/839c2a5ece0af4e75530cb520d77bc7ed8acf474/examples/textual_inversion/textual_inversion.py#L573) function. The script starts by loading the [tokenizer](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L616), [scheduler and model](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L622): ```py # Load tokenizer if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) ``` The special [placeholder token](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L632) is added next to the tokenizer, and the embedding is readjusted to account for the new token. Then, the script [creates a dataset](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L716) from the `TextualInversionDataset`: ```py train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=(" ".join(tokenizer.convert_ids_to_tokens(placeholder_token_ids))), repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers ) ``` Finally, the [training loop](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L784) handles everything else from predicting the noisy residual to updating the embedding weights of the special placeholder token. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 For this guide, you'll download some images of a [cat toy](https://huggingface.co/datasets/diffusers/cat_toy_example) and store them in a directory. But remember, you can create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide). ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download( "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes" ) ``` Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model, and `DATA_DIR` to the path where you just downloaded the cat images to. The script creates and saves the following files to your repository: - `learned_embeds.bin`: the learned embedding vectors corresponding to your example images - `token_identifier.txt`: the special placeholder token - `type_of_concept.txt`: the type of concept you're training on (either "object" or "style") <Tip warning={true}> A full training run takes ~1 hour on a single V100 GPU. </Tip> One more thing before you launch the script. If you're interested in following along with the training process, you can periodically save generated images as training progresses. Add the following parameters to the training command: ```bash --validation_prompt="A <cat-toy> train" --num_validation_images=4 --validation_steps=100 ``` <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="./cat" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 \ --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </hfoption> <hfoption id="Flax"> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="./cat" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 \ --scale_lr \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </hfoption> </hfoptions> After training is complete, you can use your newly trained model for inference like: <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```py from diffusers import StableDiffusionPipeline import torch pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipeline.load_textual_inversion("sd-concepts-library/cat-toy") image = pipeline("A <cat-toy> train", num_inference_steps=50).images[0] image.save("cat-train.png") ``` </hfoption> <hfoption id="Flax"> Flax doesn't support the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] method, but the textual_inversion_flax.py script [saves](https://github.com/huggingface/diffusers/blob/c0f058265161178f2a88849e92b37ffdc81f1dcc/examples/textual_inversion/textual_inversion_flax.py#L636C2-L636C2) the learned embeddings as a part of the model after training. This means you can use the model for inference like any other Flax model: ```py import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline model_path = "path-to-your-trained-model" pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) prompt = "A <cat-toy> train" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("cat-train.png") ``` </hfoption> </hfoptions> ## Next steps Congratulations on training your own Textual Inversion model! 🎉 To learn more about how to use your new model, the following guides may be helpful: - Learn how to [load Textual Inversion embeddings](../using-diffusers/loading_adapters) and also use them as negative embeddings. - Learn how to use [Textual Inversion](textual_inversion_inference) for inference with Stable Diffusion 1/2 and Stable Diffusion XL.
diffusers/docs/source/en/training/text_inversion.md/0
{ "file_path": "diffusers/docs/source/en/training/text_inversion.md", "repo_id": "diffusers", "token_count": 4383 }
99
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided depth-to-image generation [[open-in-colab]] The [`StableDiffusionDepth2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images. In addition, you can also pass a `depth_map` to preserve the image structure. If no `depth_map` is provided, the pipeline automatically predicts the depth via an integrated [depth-estimation model](https://github.com/isl-org/MiDaS). Start by creating an instance of the [`StableDiffusionDepth2ImgPipeline`]: ```python import torch from diffusers import StableDiffusionDepth2ImgPipeline from diffusers.utils import load_image, make_image_grid pipeline = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") ``` Now pass your prompt to the pipeline. You can also pass a `negative_prompt` to prevent certain words from guiding how an image is generated: ```python url = "http://images.cocodataset.org/val2017/000000039769.jpg" init_image = load_image(url) prompt = "two tigers" negative_prompt = "bad, deformed, ugly, bad anatomy" image = pipeline(prompt=prompt, image=init_image, negative_prompt=negative_prompt, strength=0.7).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` | Input | Output | |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> |
diffusers/docs/source/en/using-diffusers/depth2img.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/depth2img.md", "repo_id": "diffusers", "token_count": 878 }
100
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Using Diffusers with other modalities Diffusers is in the process of expanding to modalities other than images. Example type | Colab | Pipeline | :-------------------------:|:-------------------------:|:-------------------------:| [Molecule conformation](https://www.nature.com/subjects/molecular-conformation#:~:text=Definition,to%20changes%20in%20their%20environment.) generation | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/geodiff_molecule_conformation.ipynb) | ❌ More coming soon!
diffusers/docs/source/en/using-diffusers/other-modalities.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/other-modalities.md", "repo_id": "diffusers", "token_count": 333 }
101
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Understanding pipelines, models and schedulers [[open-in-colab]] 🧨 Diffusers is designed to be a user-friendly and flexible toolbox for building diffusion systems tailored to your use-case. At the core of the toolbox are models and schedulers. While the [`DiffusionPipeline`] bundles these components together for convenience, you can also unbundle the pipeline and use the models and schedulers separately to create new diffusion systems. In this tutorial, you'll learn how to use models and schedulers to assemble a diffusion system for inference, starting with a basic pipeline and then progressing to the Stable Diffusion pipeline. ## Deconstruct a basic pipeline A pipeline is a quick and easy way to run a model for inference, requiring no more than four lines of code to generate an image: ```py >>> from diffusers import DDPMPipeline >>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256", use_safetensors=True).to("cuda") >>> image = ddpm(num_inference_steps=25).images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/> </div> That was super easy, but how did the pipeline do that? Let's breakdown the pipeline and take a look at what's happening under the hood. In the example above, the pipeline contains a [`UNet2DModel`] model and a [`DDPMScheduler`]. The pipeline denoises an image by taking random noise the size of the desired output and passing it through the model several times. At each timestep, the model predicts the *noise residual* and the scheduler uses it to predict a less noisy image. The pipeline repeats this process until it reaches the end of the specified number of inference steps. To recreate the pipeline with the model and scheduler separately, let's write our own denoising process. 1. Load the model and scheduler: ```py >>> from diffusers import DDPMScheduler, UNet2DModel >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") >>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256", use_safetensors=True).to("cuda") ``` 2. Set the number of timesteps to run the denoising process for: ```py >>> scheduler.set_timesteps(50) ``` 3. Setting the scheduler timesteps creates a tensor with evenly spaced elements in it, 50 in this example. Each element corresponds to a timestep at which the model denoises an image. When you create the denoising loop later, you'll iterate over this tensor to denoise an image: ```py >>> scheduler.timesteps tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720, 700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440, 420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160, 140, 120, 100, 80, 60, 40, 20, 0]) ``` 4. Create some random noise with the same shape as the desired output: ```py >>> import torch >>> sample_size = model.config.sample_size >>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") ``` 5. Now write a loop to iterate over the timesteps. At each timestep, the model does a [`UNet2DModel.forward`] pass and returns the noisy residual. The scheduler's [`~DDPMScheduler.step`] method takes the noisy residual, timestep, and input and it predicts the image at the previous timestep. This output becomes the next input to the model in the denoising loop, and it'll repeat until it reaches the end of the `timesteps` array. ```py >>> input = noise >>> for t in scheduler.timesteps: ... with torch.no_grad(): ... noisy_residual = model(input, t).sample ... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample ... input = previous_noisy_sample ``` This is the entire denoising process, and you can use this same pattern to write any diffusion system. 6. The last step is to convert the denoised output into an image: ```py >>> from PIL import Image >>> import numpy as np >>> image = (input / 2 + 0.5).clamp(0, 1).squeeze() >>> image = (image.permute(1, 2, 0) * 255).round().to(torch.uint8).cpu().numpy() >>> image = Image.fromarray(image) >>> image ``` In the next section, you'll put your skills to the test and breakdown the more complex Stable Diffusion pipeline. The steps are more or less the same. You'll initialize the necessary components, and set the number of timesteps to create a `timestep` array. The `timestep` array is used in the denoising loop, and for each element in this array, the model predicts a less noisy image. The denoising loop iterates over the `timestep`'s, and at each timestep, it outputs a noisy residual and the scheduler uses it to predict a less noisy image at the previous timestep. This process is repeated until you reach the end of the `timestep` array. Let's try it out! ## Deconstruct the Stable Diffusion pipeline Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder to convert the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler. As you can see, this is already more complex than the DDPM pipeline which only contains a UNet model. The Stable Diffusion model has three separate pretrained models. <Tip> 💡 Read the [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) blog for more details about how the VAE, UNet, and text encoder models work. </Tip> Now that you know what you need for the Stable Diffusion pipeline, load all these components with the [`~ModelMixin.from_pretrained`] method. You can find them in the pretrained [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint, and each component is stored in a separate subfolder: ```py >>> from PIL import Image >>> import torch >>> from transformers import CLIPTextModel, CLIPTokenizer >>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler >>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", use_safetensors=True) >>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer") >>> text_encoder = CLIPTextModel.from_pretrained( ... "CompVis/stable-diffusion-v1-4", subfolder="text_encoder", use_safetensors=True ... ) >>> unet = UNet2DConditionModel.from_pretrained( ... "CompVis/stable-diffusion-v1-4", subfolder="unet", use_safetensors=True ... ) ``` Instead of the default [`PNDMScheduler`], exchange it for the [`UniPCMultistepScheduler`] to see how easy it is to plug a different scheduler in: ```py >>> from diffusers import UniPCMultistepScheduler >>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") ``` To speed up inference, move the models to a GPU since, unlike the scheduler, they have trainable weights: ```py >>> torch_device = "cuda" >>> vae.to(torch_device) >>> text_encoder.to(torch_device) >>> unet.to(torch_device) ``` ### Create text embeddings The next step is to tokenize the text to generate embeddings. The text is used to condition the UNet model and steer the diffusion process towards something that resembles the input prompt. <Tip> 💡 The `guidance_scale` parameter determines how much weight should be given to the prompt when generating an image. </Tip> Feel free to choose any prompt you like if you want to generate something else! ```py >>> prompt = ["a photograph of an astronaut riding a horse"] >>> height = 512 # default height of Stable Diffusion >>> width = 512 # default width of Stable Diffusion >>> num_inference_steps = 25 # Number of denoising steps >>> guidance_scale = 7.5 # Scale for classifier-free guidance >>> generator = torch.manual_seed(0) # Seed generator to create the initial latent noise >>> batch_size = len(prompt) ``` Tokenize the text and generate the embeddings from the prompt: ```py >>> text_input = tokenizer( ... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt" ... ) >>> with torch.no_grad(): ... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] ``` You'll also need to generate the *unconditional text embeddings* which are the embeddings for the padding token. These need to have the same shape (`batch_size` and `seq_length`) as the conditional `text_embeddings`: ```py >>> max_length = text_input.input_ids.shape[-1] >>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt") >>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] ``` Let's concatenate the conditional and unconditional embeddings into a batch to avoid doing two forward passes: ```py >>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) ``` ### Create random noise Next, generate some initial random noise as a starting point for the diffusion process. This is the latent representation of the image, and it'll be gradually denoised. At this point, the `latent` image is smaller than the final image size but that's okay though because the model will transform it into the final 512x512 image dimensions later. <Tip> 💡 The height and width are divided by 8 because the `vae` model has 3 down-sampling layers. You can check by running the following: ```py 2 ** (len(vae.config.block_out_channels) - 1) == 8 ``` </Tip> ```py >>> latents = torch.randn( ... (batch_size, unet.config.in_channels, height // 8, width // 8), ... generator=generator, ... device=torch_device, ... ) ``` ### Denoise the image Start by scaling the input with the initial noise distribution, *sigma*, the noise scale value, which is required for improved schedulers like [`UniPCMultistepScheduler`]: ```py >>> latents = latents * scheduler.init_noise_sigma ``` The last step is to create the denoising loop that'll progressively transform the pure noise in `latents` to an image described by your prompt. Remember, the denoising loop needs to do three things: 1. Set the scheduler's timesteps to use during denoising. 2. Iterate over the timesteps. 3. At each timestep, call the UNet model to predict the noise residual and pass it to the scheduler to compute the previous noisy sample. ```py >>> from tqdm.auto import tqdm >>> scheduler.set_timesteps(num_inference_steps) >>> for t in tqdm(scheduler.timesteps): ... # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. ... latent_model_input = torch.cat([latents] * 2) ... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t) ... # predict the noise residual ... with torch.no_grad(): ... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample ... # perform guidance ... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) ... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) ... # compute the previous noisy sample x_t -> x_t-1 ... latents = scheduler.step(noise_pred, t, latents).prev_sample ``` ### Decode the image The final step is to use the `vae` to decode the latent representation into an image and get the decoded output with `sample`: ```py # scale and decode the image latents with vae latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample ``` Lastly, convert the image to a `PIL.Image` to see your generated image! ```py >>> image = (image / 2 + 0.5).clamp(0, 1).squeeze() >>> image = (image.permute(1, 2, 0) * 255).to(torch.uint8).cpu().numpy() >>> image = Image.fromarray(image) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/> </div> ## Next steps From basic to complex pipelines, you've seen that all you really need to write your own diffusion system is a denoising loop. The loop should set the scheduler's timesteps, iterate over them, and alternate between calling the UNet model to predict the noise residual and passing it to the scheduler to compute the previous noisy sample. This is really what 🧨 Diffusers is designed for: to make it intuitive and easy to write your own diffusion system using models and schedulers. For your next steps, feel free to: * Learn how to [build and contribute a pipeline](../using-diffusers/contribute_pipeline) to 🧨 Diffusers. We can't wait and see what you'll come up with! * Explore [existing pipelines](../api/pipelines/overview) in the library, and see if you can deconstruct and build a pipeline from scratch using the models and schedulers separately.
diffusers/docs/source/en/using-diffusers/write_own_pipeline.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/write_own_pipeline.md", "repo_id": "diffusers", "token_count": 4145 }
102
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Apple Silicon (M1/M2)에서 Stable Diffusion을 사용하는 방법 Diffusers는 Stable Diffusion 추론을 위해 PyTorch `mps`를 사용해 Apple 실리콘과 호환됩니다. 다음은 Stable Diffusion이 있는 M1 또는 M2 컴퓨터를 사용하기 위해 따라야 하는 단계입니다. ## 요구 사항 - Apple silicon (M1/M2) 하드웨어의 Mac 컴퓨터. - macOS 12.6 또는 이후 (13.0 또는 이후 추천). - Python arm64 버전 - PyTorch 2.0(추천) 또는 1.13(`mps`를 지원하는 최소 버전). Yhttps://pytorch.org/get-started/locally/의 지침에 따라 `pip` 또는 `conda`로 설치할 수 있습니다. ## 추론 파이프라인 아래 코도는 익숙한 `to()` 인터페이스를 사용하여 `mps` 백엔드로 Stable Diffusion 파이프라인을 M1 또는 M2 장치로 이동하는 방법을 보여줍니다. <Tip warning={true}> **PyTorch 1.13을 사용 중일 때 ** 추가 일회성 전달을 사용하여 파이프라인을 "프라이밍"하는 것을 추천합니다. 이것은 발견한 이상한 문제에 대한 임시 해결 방법입니다. 첫 번째 추론 전달은 후속 전달와 약간 다른 결과를 생성합니다. 이 전달은 한 번만 수행하면 되며 추론 단계를 한 번만 사용하고 결과를 폐기해도 됩니다. </Tip> 이전 팁에서 설명한 것들을 포함한 여러 문제를 해결하므로 PyTorch 2 이상을 사용하는 것이 좋습니다. ```python # `huggingface-cli login`에 로그인되어 있음을 확인 from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to("mps") # 컴퓨터가 64GB 이하의 RAM 램일 때 추천 pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" # 처음 "워밍업" 전달 (위 설명을 보세요) _ = pipe(prompt, num_inference_steps=1) # 결과는 워밍업 전달 후의 CPU 장치의 결과와 일치합니다. image = pipe(prompt).images[0] ``` ## 성능 추천 M1/M2 성능은 메모리 압력에 매우 민감합니다. 시스템은 필요한 경우 자동으로 스왑되지만 스왑할 때 성능이 크게 저하됩니다. 특히 컴퓨터의 시스템 RAM이 64GB 미만이거나 512 × 512픽셀보다 큰 비표준 해상도에서 이미지를 생성하는 경우, 추론 중에 메모리 압력을 줄이고 스와핑을 방지하기 위해 *어텐션 슬라이싱*을 사용하는 것이 좋습니다. 어텐션 슬라이싱은 비용이 많이 드는 어텐션 작업을 한 번에 모두 수행하는 대신 여러 단계로 수행합니다. 일반적으로 범용 메모리가 없는 컴퓨터에서 ~20%의 성능 영향을 미치지만 64GB 이상이 아닌 경우 대부분의 Apple Silicon 컴퓨터에서 *더 나은 성능*이 관찰되었습니다. ```python pipeline.enable_attention_slicing() ``` ## Known Issues - 여러 프롬프트를 배치로 생성하는 것은 [충돌이 발생하거나 안정적으로 작동하지 않습니다](https://github.com/huggingface/diffusers/issues/363). 우리는 이것이 [PyTorch의 `mps` 백엔드](https://github.com/pytorch/pytorch/issues/84039)와 관련이 있다고 생각합니다. 이 문제는 해결되고 있지만 지금은 배치 대신 반복 방법을 사용하는 것이 좋습니다.
diffusers/docs/source/ko/optimization/mps.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/mps.md", "repo_id": "diffusers", "token_count": 2532 }
103
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Low-Rank Adaptation of Large Language Models (LoRA) [[open-in-colab]] <Tip warning={true}> 현재 LoRA는 [`UNet2DConditionalModel`]의 어텐션 레이어에서만 지원됩니다. </Tip> [LoRA(Low-Rank Adaptation of Large Language Models)](https://arxiv.org/abs/2106.09685)는 메모리를 적게 사용하면서 대규모 모델의 학습을 가속화하는 학습 방법입니다. 이는 rank-decomposition weight 행렬 쌍(**업데이트 행렬**이라고 함)을 추가하고 새로 추가된 가중치**만** 학습합니다. 여기에는 몇 가지 장점이 있습니다. - 이전에 미리 학습된 가중치는 고정된 상태로 유지되므로 모델이 [치명적인 망각](https://www.pnas.org/doi/10.1073/pnas.1611835114) 경향이 없습니다. - Rank-decomposition 행렬은 원래 모델보다 파라메터 수가 훨씬 적으므로 학습된 LoRA 가중치를 쉽게 끼워넣을 수 있습니다. - LoRA 매트릭스는 일반적으로 원본 모델의 어텐션 레이어에 추가됩니다. 🧨 Diffusers는 [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] 메서드를 제공하여 LoRA 가중치를 모델의 어텐션 레이어로 불러옵니다. `scale` 매개변수를 통해 모델이 새로운 학습 이미지에 맞게 조정되는 범위를 제어할 수 있습니다. - 메모리 효율성이 향상되어 Tesla T4, RTX 3080 또는 RTX 2080 Ti와 같은 소비자용 GPU에서 파인튜닝을 실행할 수 있습니다! T4와 같은 GPU는 무료이며 Kaggle 또는 Google Colab 노트북에서 쉽게 액세스할 수 있습니다. <Tip> 💡 LoRA는 어텐션 레이어에만 한정되지는 않습니다. 저자는 언어 모델의 어텐션 레이어를 수정하는 것이 매우 효율적으로 죻은 성능을 얻기에 충분하다는 것을 발견했습니다. 이것이 LoRA 가중치를 모델의 어텐션 레이어에 추가하는 것이 일반적인 이유입니다. LoRA 작동 방식에 대한 자세한 내용은 [Using LoRA for effective Stable Diffusion fine-tuning](https://huggingface.co/blog/lora) 블로그를 확인하세요! </Tip> [cloneofsimo](https://github.com/cloneofsimo)는 인기 있는 [lora](https://github.com/cloneofsimo/lora) GitHub 리포지토리에서 Stable Diffusion을 위한 LoRA 학습을 최초로 시도했습니다. 🧨 Diffusers는 [text-to-image 생성](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image#training-with-lora) 및 [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#training-with-low-rank-adaptation-of-large-language-models-lora)을 지원합니다. 이 가이드는 두 가지를 모두 수행하는 방법을 보여줍니다. 모델을 저장하거나 커뮤니티와 공유하려면 Hugging Face 계정에 로그인하세요(아직 계정이 없는 경우 [생성](hf.co/join)하세요): ```bash huggingface-cli login ``` ## Text-to-image 수십억 개의 파라메터들이 있는 Stable Diffusion과 같은 모델을 파인튜닝하는 것은 느리고 어려울 수 있습니다. LoRA를 사용하면 diffusion 모델을 파인튜닝하는 것이 훨씬 쉽고 빠릅니다. 8비트 옵티마이저와 같은 트릭에 의존하지 않고도 11GB의 GPU RAM으로 하드웨어에서 실행할 수 있습니다. ### 학습[[dreambooth-training]] [Pokémon BLIP 캡션](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 데이터셋으로 [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)를 파인튜닝해 나만의 포켓몬을 생성해 보겠습니다. 시작하려면 `MODEL_NAME` 및 `DATASET_NAME` 환경 변수가 설정되어 있는지 확인하십시오. `OUTPUT_DIR` 및 `HUB_MODEL_ID` 변수는 선택 사항이며 허브에서 모델을 저장할 위치를 지정합니다. ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="/sddata/finetune/lora/pokemon" export HUB_MODEL_ID="pokemon-lora" export DATASET_NAME="lambdalabs/pokemon-blip-captions" ``` 학습을 시작하기 전에 알아야 할 몇 가지 플래그가 있습니다. * `--push_to_hub`를 명시하면 학습된 LoRA 임베딩을 허브에 저장합니다. * `--report_to=wandb`는 학습 결과를 가중치 및 편향 대시보드에 보고하고 기록합니다(예를 들어, 이 [보고서](https://wandb.ai/pcuenq/text2image-fine-tune/run/b4k1w0tn?workspace=user-pcuenq)를 참조하세요). * `--learning_rate=1e-04`, 일반적으로 LoRA에서 사용하는 것보다 더 높은 학습률을 사용할 수 있습니다. 이제 학습을 시작할 준비가 되었습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)에서 찾을 수 있습니다). ```bash accelerate launch train_dreambooth_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --checkpointing_steps=100 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=50 \ --seed="0" \ --push_to_hub ``` ### 추론[[dreambooth-inference]] 이제 [`StableDiffusionPipeline`]에서 기본 모델을 불러와 추론을 위해 모델을 사용할 수 있습니다: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> model_base = "runwayml/stable-diffusion-v1-5" >>> pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16) ``` *기본 모델의 가중치 위에* 파인튜닝된 DreamBooth 모델에서 LoRA 가중치를 불러온 다음, 더 빠른 추론을 위해 파이프라인을 GPU로 이동합니다. LoRA 가중치를 프리징된 사전 훈련된 모델 가중치와 병합할 때, 선택적으로 'scale' 매개변수로 어느 정도의 가중치를 병합할 지 조절할 수 있습니다: <Tip> 💡 `0`의 `scale` 값은 LoRA 가중치를 사용하지 않아 원래 모델의 가중치만 사용한 것과 같고, `1`의 `scale` 값은 파인튜닝된 LoRA 가중치만 사용함을 의미합니다. 0과 1 사이의 값들은 두 결과들 사이로 보간됩니다. </Tip> ```py >>> pipe.unet.load_attn_procs(model_path) >>> pipe.to("cuda") # LoRA 파인튜닝된 모델의 가중치 절반과 기본 모델의 가중치 절반 사용 >>> image = pipe( ... "A picture of a sks dog in a bucket.", ... num_inference_steps=25, ... guidance_scale=7.5, ... cross_attention_kwargs={"scale": 0.5}, ... ).images[0] # 완전히 파인튜닝된 LoRA 모델의 가중치 사용 >>> image = pipe("A picture of a sks dog in a bucket.", num_inference_steps=25, guidance_scale=7.5).images[0] >>> image.save("bucket-dog.png") ```
diffusers/docs/source/ko/training/lora.md/0
{ "file_path": "diffusers/docs/source/ko/training/lora.md", "repo_id": "diffusers", "token_count": 4733 }
104
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 파이프라인, 모델, 스케줄러 불러오기 기본적으로 diffusion 모델은 다양한 컴포넌트들(모델, 토크나이저, 스케줄러) 간의 복잡한 상호작용을 기반으로 동작합니다. 디퓨저스(Diffusers)는 이러한 diffusion 모델을 보다 쉽고 간편한 API로 제공하는 것을 목표로 설계되었습니다. [`DiffusionPipeline`]은 diffusion 모델이 갖는 복잡성을 하나의 파이프라인 API로 통합하고, 동시에 이를 구성하는 각각의 컴포넌트들을 태스크에 맞춰 유연하게 커스터마이징할 수 있도록 지원하고 있습니다. diffusion 모델의 훈련과 추론에 필요한 모든 것은 [`DiffusionPipeline.from_pretrained`] 메서드를 통해 접근할 수 있습니다. (이 말의 의미는 다음 단락에서 보다 자세하게 다뤄보도록 하겠습니다.) 이 문서에서는 설명할 내용은 다음과 같습니다. * 허브를 통해 혹은 로컬로 파이프라인을 불러오는 법 * 파이프라인에 다른 컴포넌트들을 적용하는 법 * 오리지널 체크포인트가 아닌 variant를 불러오는 법 (variant란 기본으로 설정된 `fp32`가 아닌 다른 부동 소수점 타입(예: `fp16`)을 사용하거나 Non-EMA 가중치를 사용하는 체크포인트들을 의미합니다.) * 모델과 스케줄러를 불러오는 법 ## Diffusion 파이프라인 <Tip> 💡 [`DiffusionPipeline`] 클래스가 동작하는 방식에 보다 자세한 내용이 궁금하다면, [DiffusionPipeline explained](#diffusionpipeline에-대해-알아보기) 섹션을 확인해보세요. </Tip> [`DiffusionPipeline`] 클래스는 diffusion 모델을 [허브](https://huggingface.co/models?library=diffusers)로부터 불러오는 가장 심플하면서 보편적인 방식입니다. [`DiffusionPipeline.from_pretrained`] 메서드는 적합한 파이프라인 클래스를 자동으로 탐지하고, 필요한 구성요소(configuration)와 가중치(weight) 파일들을 다운로드하고 캐싱한 다음, 해당 파이프라인 인스턴스를 반환합니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = DiffusionPipeline.from_pretrained(repo_id) ``` 물론 [`DiffusionPipeline`] 클래스를 사용하지 않고, 명시적으로 직접 해당 파이프라인 클래스를 불러오는 것도 가능합니다. 아래 예시 코드는 위 예시와 동일한 인스턴스를 반환합니다. ```python from diffusers import StableDiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(repo_id) ``` [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)이나 [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) 같은 체크포인트들의 경우, 하나 이상의 다양한 태스크에 활용될 수 있습니다. (예를 들어 위의 두 체크포인트의 경우, text-to-image와 image-to-image에 모두 활용될 수 있습니다.) 만약 이러한 체크포인트들을 기본 설정 태스크가 아닌 다른 태스크에 활용하고자 한다면, 해당 태스크에 대응되는 파이프라인(task-specific pipeline)을 사용해야 합니다. ```python from diffusers import StableDiffusionImg2ImgPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id) ``` ### 로컬 파이프라인 파이프라인을 로컬로 불러오고자 한다면, `git-lfs`를 사용하여 직접 체크포인트를 로컬 디스크에 다운로드 받아야 합니다. 아래의 명령어를 실행하면 `./stable-diffusion-v1-5`란 이름으로 폴더가 로컬디스크에 생성됩니다. ```bash git lfs install git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` 그런 다음 해당 로컬 경로를 [`~DiffusionPipeline.from_pretrained`] 메서드에 전달합니다. ```python from diffusers import DiffusionPipeline repo_id = "./stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) ``` 위의 예시코드처럼 만약 `repo_id`가 로컬 패스(local path)라면, [`~DiffusionPipeline.from_pretrained`] 메서드는 이를 자동으로 감지하여 허브에서 파일을 다운로드하지 않습니다. 만약 로컬 디스크에 저장된 파이프라인 체크포인트가 최신 버전이 아닐 경우에도, 최신 버전을 다운로드하지 않고 기존 로컬 디스크에 저장된 체크포인트를 사용한다는 것을 의미합니다. ### 파이프라인 내부의 컴포넌트 교체하기 파이프라인 내부의 컴포넌트들은 호환 가능한 다른 컴포넌트로 교체될 수 있습니다. 이와 같은 컴포넌트 교체가 중요한 이유는 다음과 같습니다. - 어떤 스케줄러를 사용할 것인가는 생성속도와 생성품질 간의 트레이드오프를 정의하는 중요한 요소입니다. - diffusion 모델 내부의 컴포넌트들은 일반적으로 각각 독립적으로 훈련되기 때문에, 더 좋은 성능을 보여주는 컴포넌트가 있다면 그걸로 교체하는 식으로 성능을 향상시킬 수 있습니다. - 파인 튜닝 단계에서는 일반적으로 UNet 혹은 텍스트 인코더와 같은 일부 컴포넌트들만 훈련하게 됩니다. 어떤 스케줄러들이 호환가능한지는 `compatibles` 속성을 통해 확인할 수 있습니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) stable_diffusion.scheduler.compatibles ``` 이번에는 [`SchedulerMixin.from_pretrained`] 메서드를 사용해서, 기존 기본 스케줄러였던 [`PNDMScheduler`]를 보다 우수한 성능의 [`EulerDiscreteScheduler`]로 바꿔봅시다. 스케줄러를 로드할 때는 `subfolder` 인자를 통해, 해당 파이프라인의 리포지토리에서 [스케줄러에 관한 하위폴더](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/scheduler)를 명시해주어야 합니다. 그 다음 새롭게 생성한 [`EulerDiscreteScheduler`] 인스턴스를 [`DiffusionPipeline`]의 `scheduler` 인자에 전달합니다. ```python from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler repo_id = "runwayml/stable-diffusion-v1-5" scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler) ``` ### 세이프티 체커 스테이블 diffusion과 같은 diffusion 모델들은 유해한 이미지를 생성할 수도 있습니다. 이를 예방하기 위해 디퓨저스는 생성된 이미지의 유해성을 판단하는 [세이프티 체커(safety checker)](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) 기능을 지원하고 있습니다. 만약 세이프티 체커의 사용을 원하지 않는다면, `safety_checker` 인자에 `None`을 전달해주시면 됩니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None) ``` ### 컴포넌트 재사용 복수의 파이프라인에 동일한 모델이 반복적으로 사용한다면, 굳이 해당 모델의 동일한 가중치를 중복으로 RAM에 불러올 필요는 없을 것입니다. [`~DiffusionPipeline.components`] 속성을 통해 파이프라인 내부의 컴포넌트들을 참조할 수 있는데, 이번 단락에서는 이를 통해 동일한 모델 가중치를 RAM에 중복으로 불러오는 것을 방지하는 법에 대해 알아보겠습니다. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) components = stable_diffusion_txt2img.components ``` 그 다음 위 예시 코드에서 선언한 `components` 변수를 다른 파이프라인에 전달함으로써, 모델의 가중치를 중복으로 RAM에 로딩하지 않고, 동일한 컴포넌트를 재사용할 수 있습니다. ```python stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components) ``` 물론 각각의 컴포넌트들을 따로 따로 파이프라인에 전달할 수도 있습니다. 예를 들어 `stable_diffusion_txt2img` 파이프라인 안의 컴포넌트들 가운데서 세이프티 체커(`safety_checker`)와 피쳐 익스트랙터(`feature_extractor`)를 제외한 컴포넌트들만 `stable_diffusion_img2img` 파이프라인에서 재사용하는 방식 역시 가능합니다. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) stable_diffusion_img2img = StableDiffusionImg2ImgPipeline( vae=stable_diffusion_txt2img.vae, text_encoder=stable_diffusion_txt2img.text_encoder, tokenizer=stable_diffusion_txt2img.tokenizer, unet=stable_diffusion_txt2img.unet, scheduler=stable_diffusion_txt2img.scheduler, safety_checker=None, feature_extractor=None, requires_safety_checker=False, ) ``` ## Checkpoint variants Variant란 일반적으로 다음과 같은 체크포인트들을 의미합니다. - `torch.float16`과 같이 정밀도는 더 낮지만, 용량 역시 더 작은 부동소수점 타입의 가중치를 사용하는 체크포인트. *(다만 이와 같은 variant의 경우, 추가적인 훈련과 CPU환경에서의 구동이 불가능합니다.)* - Non-EMA 가중치를 사용하는 체크포인트. *(Non-EMA 가중치의 경우, 파인 튜닝 단계에서 사용하는 것이 권장되는데, 추론 단계에선 사용하지 않는 것이 권장됩니다.)* <Tip> 💡 모델 구조는 동일하지만 서로 다른 학습 환경에서 서로 다른 데이터셋으로 학습된 체크포인트들이 있을 경우, 해당 체크포인트들은 variant 단계가 아닌 리포지토리 단계에서 분리되어 관리되어야 합니다. (즉, 해당 체크포인트들은 서로 다른 리포지토리에서 따로 관리되어야 합니다. 예시: [`stable-diffusion-v1-4`], [`stable-diffusion-v1-5`]). </Tip> | **checkpoint type** | **weight name** | **argument for loading weights** | | ------------------- | ----------------------------------- | -------------------------------- | | original | diffusion_pytorch_model.bin | | | floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` | | non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` | variant를 로드할 때 2개의 중요한 argument가 있습니다. * `torch_dtype`은 불러올 체크포인트의 부동소수점을 정의합니다. 예를 들어 `torch_dtype=torch.float16`을 명시함으로써 가중치의 부동소수점 타입을 `fl16`으로 변환할 수 있습니다. (만약 따로 설정하지 않을 경우, 기본값으로 `fp32` 타입의 가중치가 로딩됩니다.) 또한 `variant` 인자를 명시하지 않은 채로 체크포인트를 불러온 다음, 해당 체크포인트를 `torch_dtype=torch.float16` 인자를 통해 `fp16` 타입으로 변환하는 것 역시 가능합니다. 이 경우 기본으로 설정된 `fp32` 가중치가 먼저 다운로드되고, 해당 가중치들을 불러온 다음 `fp16` 타입으로 변환하게 됩니다. * `variant` 인자는 리포지토리에서 어떤 variant를 불러올 것인가를 정의합니다. 가령 [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) 리포지토리로부터 `non_ema` 체크포인트를 불러오고자 한다면, `variant="non_ema"` 인자를 전달해야 합니다. ```python from diffusers import DiffusionPipeline # load fp16 variant stable_diffusion = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) # load non_ema variant stable_diffusion = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` 다른 부동소수점 타입의 가중치 혹은 non-EMA 가중치를 사용하는 체크포인트를 저장하기 위해서는, [`DiffusionPipeline.save_pretrained`] 메서드를 사용해야 하며, 이 때 `variant` 인자를 명시해줘야 합니다. 원래의 체크포인트와 동일한 폴더에 variant를 저장해야 하며, 이렇게 하면 동일한 폴더에서 오리지널 체크포인트과 variant를 모두 불러올 수 있습니다. ```python from diffusers import DiffusionPipeline # save as fp16 variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16") # save as non-ema variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` 만약 variant를 기존 폴더에 저장하지 않을 경우, `variant` 인자를 반드시 명시해야 합니다. 그렇게 하지 않을 경우 원래의 오리지널 체크포인트를 찾을 수 없게 되기 때문에 에러가 발생합니다. ```python # 👎 this won't work stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", torch_dtype=torch.float16) # 👍 this works stable_diffusion = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) ``` ### 모델 불러오기 모델들은 [`ModelMixin.from_pretrained`] 메서드를 통해 불러올 수 있습니다. 해당 메서드는 최신 버전의 모델 가중치 파일과 설정 파일(configurations)을 다운로드하고 캐싱합니다. 만약 이러한 파일들이 최신 버전으로 로컬 캐시에 저장되어 있다면, [`ModelMixin.from_pretrained`]는 굳이 해당 파일들을 다시 다운로드하지 않으며, 그저 캐시에 있는 최신 파일들을 재사용합니다. 모델은 `subfolder` 인자에 명시된 하위 폴더로부터 로드됩니다. 예를 들어 `runwayml/stable-diffusion-v1-5`의 UNet 모델의 가중치는 [`unet`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/unet) 폴더에 저장되어 있습니다. ```python from diffusers import UNet2DConditionModel repo_id = "runwayml/stable-diffusion-v1-5" model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet") ``` 혹은 [해당 모델의 리포지토리](https://huggingface.co/google/ddpm-cifar10-32/tree/main)로부터 다이렉트로 가져오는 것 역시 가능합니다. ```python from diffusers import UNet2DModel repo_id = "google/ddpm-cifar10-32" model = UNet2DModel.from_pretrained(repo_id) ``` 또한 앞서 봤던 `variant` 인자를 명시함으로써, Non-EMA나 `fp16`의 가중치를 가져오는 것 역시 가능합니다. ```python from diffusers import UNet2DConditionModel model = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", variant="non-ema") model.save_pretrained("./local-unet", variant="non-ema") ``` ### 스케줄러 스케줄러들은 [`SchedulerMixin.from_pretrained`] 메서드를 통해 불러올 수 있습니다. 모델과 달리 스케줄러는 별도의 가중치를 갖지 않으며, 따라서 당연히 별도의 학습과정을 요구하지 않습니다. 이러한 스케줄러들은 (해당 스케줄러 하위폴더의) configration 파일을 통해 정의됩니다. 여러개의 스케줄러를 불러온다고 해서 많은 메모리를 소모하는 것은 아니며, 다양한 스케줄러들에 동일한 스케줄러 configration을 적용하는 것 역시 가능합니다. 다음 예시 코드에서 불러오는 스케줄러들은 모두 [`StableDiffusionPipeline`]과 호환되는데, 이는 곧 해당 스케줄러들에 동일한 스케줄러 configration 파일을 적용할 수 있음을 의미합니다. ```python from diffusers import StableDiffusionPipeline from diffusers import ( DDPMScheduler, DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ) repo_id = "runwayml/stable-diffusion-v1-5" ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler") ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler") pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler") lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler") # replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler` pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm) ``` ### DiffusionPipeline에 대해 알아보기 클래스 메서드로서 [`DiffusionPipeline.from_pretrained`]은 2가지를 담당합니다. - 첫째로, `from_pretrained` 메서드는 최신 버전의 파이프라인을 다운로드하고, 캐시에 저장합니다. 이미 로컬 캐시에 최신 버전의 파이프라인이 저장되어 있다면, [`DiffusionPipeline.from_pretrained`]은 해당 파일들을 다시 다운로드하지 않고, 로컬 캐시에 저장되어 있는 파이프라인을 불러옵니다. - `model_index.json` 파일을 통해 체크포인트에 대응되는 적합한 파이프라인 클래스로 불러옵니다. 파이프라인의 폴더 구조는 해당 파이프라인 클래스의 구조와 직접적으로 일치합니다. 예를 들어 [`StableDiffusionPipeline`] 클래스는 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 리포지토리와 대응되는 구조를 갖습니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(repo_id) print(pipeline) ``` 위의 코드 출력 결과를 확인해보면, `pipeline`은 [`StableDiffusionPipeline`]의 인스턴스이며, 다음과 같이 총 7개의 컴포넌트로 구성된다는 것을 알 수 있습니다. - `"feature_extractor"`: [`~transformers.CLIPFeatureExtractor`]의 인스턴스 - `"safety_checker"`: 유해한 컨텐츠를 스크리닝하기 위한 [컴포넌트](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) - `"scheduler"`: [`PNDMScheduler`]의 인스턴스 - `"text_encoder"`: [`~transformers.CLIPTextModel`]의 인스턴스 - `"tokenizer"`: a [`~transformers.CLIPTokenizer`]의 인스턴스 - `"unet"`: [`UNet2DConditionModel`]의 인스턴스 - `"vae"` [`AutoencoderKL`]의 인스턴스 ```json StableDiffusionPipeline { "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` 파이프라인 인스턴스의 컴포넌트들을 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)의 폴더 구조와 비교해볼 경우, 각각의 컴포넌트마다 별도의 폴더가 있음을 확인할 수 있습니다. ``` . ├── feature_extractor │ └── preprocessor_config.json ├── model_index.json ├── safety_checker │ ├── config.json │ └── pytorch_model.bin ├── scheduler │ └── scheduler_config.json ├── text_encoder │ ├── config.json │ └── pytorch_model.bin ├── tokenizer │ ├── merges.txt │ ├── special_tokens_map.json │ ├── tokenizer_config.json │ └── vocab.json ├── unet │ ├── config.json │ ├── diffusion_pytorch_model.bin └── vae ├── config.json ├── diffusion_pytorch_model.bin ``` 또한 각각의 컴포넌트들을 파이프라인 인스턴스의 속성으로써 참조할 수 있습니다. ```py pipeline.tokenizer ``` ```python CLIPTokenizer( name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", vocab_size=49408, model_max_length=77, is_fast=False, padding_side="right", truncation_side="right", special_tokens={ "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "pad_token": "<|endoftext|>", }, ) ``` 모든 파이프라인은 `model_index.json` 파일을 통해 [`DiffusionPipeline`]에 다음과 같은 정보를 전달합니다. - `_class_name` 는 어떤 파이프라인 클래스를 사용해야 하는지에 대해 알려줍니다. - `_diffusers_version`는 어떤 버전의 디퓨저스로 파이프라인 안의 모델들이 만들어졌는지를 알려줍니다. - 그 다음은 각각의 컴포넌트들이 어떤 라이브러리의 어떤 클래스로 만들어졌는지에 대해 알려줍니다. (아래 예시에서 `"feature_extractor" : ["transformers", "CLIPImageProcessor"]`의 경우, `feature_extractor` 컴포넌트는 `transformers` 라이브러리의 `CLIPImageProcessor` 클래스를 통해 만들어졌다는 것을 의미합니다.) ```json { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ```
diffusers/docs/source/ko/using-diffusers/loading.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/loading.md", "repo_id": "diffusers", "token_count": 14650 }
105
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Tour rápido Modelos de difusão são treinados para remover o ruído Gaussiano aleatório passo a passo para gerar uma amostra de interesse, como uma imagem ou áudio. Isso despertou um tremendo interesse em IA generativa, e você provavelmente já viu exemplos de imagens geradas por difusão na internet. 🧨 Diffusers é uma biblioteca que visa tornar os modelos de difusão amplamente acessíveis a todos. Seja você um desenvolvedor ou um usuário, esse tour rápido irá introduzir você ao 🧨 Diffusers e ajudar você a começar a gerar rapidamente! Há três componentes principais da biblioteca para conhecer: - O [`DiffusionPipeline`] é uma classe de alto nível de ponta a ponta desenhada para gerar rapidamente amostras de modelos de difusão pré-treinados para inferência. - [Modelos](./api/models) pré-treinados populares e módulos que podem ser usados como blocos de construção para criar sistemas de difusão. - Vários [Agendadores](./api/schedulers/overview) diferentes - algoritmos que controlam como o ruído é adicionado para treinamento, e como gerar imagens sem o ruído durante a inferência. Esse tour rápido mostrará como usar o [`DiffusionPipeline`] para inferência, e então mostrará como combinar um modelo e um agendador para replicar o que está acontecendo dentro do [`DiffusionPipeline`]. <Tip> Esse tour rápido é uma versão simplificada da introdução 🧨 Diffusers [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) para ajudar você a começar rápido. Se você quer aprender mais sobre o objetivo do 🧨 Diffusers, filosofia de design, e detalhes adicionais sobre a API principal, veja o notebook! </Tip> Antes de começar, certifique-se de ter todas as bibliotecas necessárias instaladas: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) acelera o carregamento do modelo para geração e treinamento. - [🤗 Transformers](https://huggingface.co/docs/transformers/index) é necessário para executar os modelos mais populares de difusão, como o [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ## DiffusionPipeline O [`DiffusionPipeline`] é a forma mais fácil de usar um sistema de difusão pré-treinado para geração. É um sistema de ponta a ponta contendo o modelo e o agendador. Você pode usar o [`DiffusionPipeline`] pronto para muitas tarefas. Dê uma olhada na tabela abaixo para algumas tarefas suportadas, e para uma lista completa de tarefas suportadas, veja a tabela [Resumo do 🧨 Diffusers](./api/pipelines/overview#diffusers-summary). | **Tarefa** | **Descrição** | **Pipeline** | | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | | Unconditional Image Generation | gera uma imagem a partir do ruído Gaussiano | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | gera uma imagem a partir de um prompt de texto | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapta uma imagem guiada por um prompt de texto | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | preenche a parte da máscara da imagem, dado a imagem, a máscara e o prompt de texto | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapta as partes de uma imagem guiada por um prompt de texto enquanto preserva a estrutura por estimativa de profundidade | [depth2img](./using-diffusers/depth2img) | Comece criando uma instância do [`DiffusionPipeline`] e especifique qual checkpoint do pipeline você gostaria de baixar. Você pode usar o [`DiffusionPipeline`] para qualquer [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) armazenado no Hugging Face Hub. Nesse quicktour, você carregará o checkpoint [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) para geração de texto para imagem. <Tip warning={true}> Para os modelos de [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion), por favor leia cuidadosamente a [licença](https://huggingface.co/spaces/CompVis/stable-diffusion-license) primeiro antes de rodar o modelo. 🧨 Diffusers implementa uma verificação de segurança: [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) para prevenir conteúdo ofensivo ou nocivo, mas as capacidades de geração de imagem aprimorada do modelo podem ainda produzir conteúdo potencialmente nocivo. </Tip> Para carregar o modelo com o método [`~DiffusionPipeline.from_pretrained`]: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) ``` O [`DiffusionPipeline`] baixa e armazena em cache todos os componentes de modelagem, tokenização, e agendamento. Você verá que o pipeline do Stable Diffusion é composto pelo [`UNet2DConditionModel`] e [`PNDMScheduler`] entre outras coisas: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` Nós fortemente recomendamos rodar o pipeline em uma placa de vídeo, pois o modelo consiste em aproximadamente 1.4 bilhões de parâmetros. Você pode mover o objeto gerador para uma placa de vídeo, assim como você faria no PyTorch: ```python >>> pipeline.to("cuda") ``` Agora você pode passar o prompt de texto para o `pipeline` para gerar uma imagem, e então acessar a imagem sem ruído. Por padrão, a saída da imagem é embrulhada em um objeto [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class). ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> Salve a imagem chamando o `save`: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### Pipeline local Você também pode utilizar o pipeline localmente. A única diferença é que você precisa baixar os pesos primeiro: ```bash !git lfs install !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` Assim carregue os pesos salvos no pipeline: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` Agora você pode rodar o pipeline como você faria na seção acima. ### Troca dos agendadores Agendadores diferentes tem diferentes velocidades de retirar o ruído e compensações de qualidade. A melhor forma de descobrir qual funciona melhor para você é testar eles! Uma das principais características do 🧨 Diffusers é permitir que você troque facilmente entre agendadores. Por exemplo, para substituir o [`PNDMScheduler`] padrão com o [`EulerDiscreteScheduler`], carregue ele com o método [`~diffusers.ConfigMixin.from_config`]: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` Tente gerar uma imagem com o novo agendador e veja se você nota alguma diferença! Na próxima seção, você irá dar uma olhada mais de perto nos componentes - o modelo e o agendador - que compõe o [`DiffusionPipeline`] e aprender como usar esses componentes para gerar uma imagem de um gato. ## Modelos A maioria dos modelos recebe uma amostra de ruído, e em cada _timestep_ ele prevê o _noise residual_ (outros modelos aprendem a prever a amostra anterior diretamente ou a velocidade ou [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)), a diferença entre uma imagem menos com ruído e a imagem de entrada. Você pode misturar e combinar modelos para criar outros sistemas de difusão. Modelos são inicializados com o método [`~ModelMixin.from_pretrained`] que também armazena em cache localmente os pesos do modelo para que seja mais rápido na próxima vez que você carregar o modelo. Para o tour rápido, você irá carregar o [`UNet2DModel`], um modelo básico de geração de imagem incondicional com um checkpoint treinado em imagens de gato: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` Para acessar os parâmetros do modelo, chame `model.config`: ```py >>> model.config ``` A configuração do modelo é um dicionário 🧊 congelado 🧊, o que significa que esses parâmetros não podem ser mudados depois que o modelo é criado. Isso é intencional e garante que os parâmetros usados para definir a arquitetura do modelo no início permaneçam os mesmos, enquanto outros parâmetros ainda podem ser ajustados durante a geração. Um dos parâmetros mais importantes são: - `sample_size`: a dimensão da altura e largura da amostra de entrada. - `in_channels`: o número de canais de entrada da amostra de entrada. - `down_block_types` e `up_block_types`: o tipo de blocos de downsampling e upsampling usados para criar a arquitetura UNet. - `block_out_channels`: o número de canais de saída dos blocos de downsampling; também utilizado como uma order reversa do número de canais de entrada dos blocos de upsampling. - `layers_per_block`: o número de blocks ResNet presentes em cada block UNet. Para usar o modelo para geração, crie a forma da imagem com ruído Gaussiano aleatório. Deve ter um eixo `batch` porque o modelo pode receber múltiplos ruídos aleatórios, um eixo `channel` correspondente ao número de canais de entrada, e um eixo `sample_size` para a altura e largura da imagem: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` Para geração, passe a imagem com ruído para o modelo e um `timestep`. O `timestep` indica o quão ruidosa a imagem de entrada é, com mais ruído no início e menos no final. Isso ajuda o modelo a determinar sua posição no processo de difusão, se está mais perto do início ou do final. Use o método `sample` para obter a saída do modelo: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` Para geração de exemplos reais, você precisará de um agendador para guiar o processo de retirada do ruído. Na próxima seção, você irá aprender como acoplar um modelo com um agendador. ## Agendadores Agendadores gerenciam a retirada do ruído de uma amostra ruidosa para uma amostra menos ruidosa dado a saída do modelo - nesse caso, é o `noisy_residual`. <Tip> 🧨 Diffusers é uma caixa de ferramentas para construir sistemas de difusão. Enquanto o [`DiffusionPipeline`] é uma forma conveniente de começar com um sistema de difusão pré-construído, você também pode escolher seus próprios modelos e agendadores separadamente para construir um sistema de difusão personalizado. </Tip> Para o tour rápido, você irá instanciar o [`DDPMScheduler`] com o método [`~diffusers.ConfigMixin.from_config`]: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 Perceba como o agendador é instanciado de uma configuração. Diferentemente de um modelo, um agendador não tem pesos treináveis e é livre de parâmetros! </Tip> Um dos parâmetros mais importante são: - `num_train_timesteps`: o tamanho do processo de retirar ruído ou em outras palavras, o número de _timesteps_ necessários para o processo de ruídos Gausianos aleatórios dentro de uma amostra de dados. - `beta_schedule`: o tipo de agendados de ruído para o uso de geração e treinamento. - `beta_start` e `beta_end`: para começar e terminar os valores de ruído para o agendador de ruído. Para predizer uma imagem com um pouco menos de ruído, passe o seguinte para o método do agendador [`~diffusers.DDPMScheduler.step`]: saída do modelo, `timestep`, e a atual `amostra`. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` O `less_noisy_sample` pode ser passado para o próximo `timestep` onde ele ficará ainda com menos ruído! Vamos juntar tudo agora e visualizar o processo inteiro de retirada de ruído. Comece, criando a função que faça o pós-processamento e mostre a imagem sem ruído como uma `PIL.Image`: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` Para acelerar o processo de retirada de ruído, mova a entrada e o modelo para uma GPU: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` Agora, crie um loop de retirada de ruído que prediz o residual da amostra menos ruidosa, e computa a amostra menos ruidosa com o agendador: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` Sente-se e assista o gato ser gerado do nada além de ruído! 😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## Próximos passos Esperamos que você tenha gerado algumas imagens legais com o 🧨 Diffusers neste tour rápido! Para suas próximas etapas, você pode - Treine ou faça a configuração fina de um modelo para gerar suas próprias imagens no tutorial de [treinamento](./tutorials/basic_training). - Veja exemplos oficiais e da comunidade de [scripts de treinamento ou configuração fina](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) para os mais variados casos de uso. - Aprenda sobre como carregar, acessar, mudar e comparar agendadores no guia [Usando diferentes agendadores](./using-diffusers/schedulers). - Explore engenharia de prompt, otimizações de velocidade e memória, e dicas e truques para gerar imagens de maior qualidade com o guia [Stable Diffusion](./stable_diffusion). - Se aprofunde em acelerar 🧨 Diffusers com guias sobre [PyTorch otimizado em uma GPU](./optimization/fp16), e guias de inferência para rodar [Stable Diffusion em Apple Silicon (M1/M2)](./optimization/mps) e [ONNX Runtime](./optimization/onnx).
diffusers/docs/source/pt/quicktour.md/0
{ "file_path": "diffusers/docs/source/pt/quicktour.md", "repo_id": "diffusers", "token_count": 6766 }
106
# -*- coding: utf-8 -*- import inspect from typing import Optional, Union import numpy as np import PIL.Image import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.torch_utils import randn_tensor def preprocess(image, w, h): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def slerp(t, v0, v1, DOT_THRESHOLD=0.9995): if not isinstance(v0, np.ndarray): inputs_are_torch = True input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) if np.abs(dot) > DOT_THRESHOLD: v2 = (1 - t) * v0 + t * v1 else: theta_0 = np.arccos(dot) sin_theta_0 = np.sin(theta_0) theta_t = theta_0 * t sin_theta_t = np.sin(theta_t) s0 = np.sin(theta_0 - theta_t) / sin_theta_0 s1 = sin_theta_t / sin_theta_0 v2 = s0 * v0 + s1 * v1 if inputs_are_torch: v2 = torch.from_numpy(v2).to(input_device) return v2 def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def set_requires_grad(model, value): for param in model.parameters(): param.requires_grad = value class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMixin): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, clip_model: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPFeatureExtractor, coca_model=None, coca_tokenizer=None, coca_transform=None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, clip_model=clip_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, coca_model=coca_model, coca_tokenizer=coca_tokenizer, coca_transform=coca_transform, ) self.feature_extractor_size = ( feature_extractor.size if isinstance(feature_extractor.size, int) else feature_extractor.size["shortest_edge"] ) self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) set_requires_grad(self.text_encoder, False) set_requires_grad(self.clip_model, False) def freeze_vae(self): set_requires_grad(self.vae, False) def unfreeze_vae(self): set_requires_grad(self.vae, True) def freeze_unet(self): set_requires_grad(self.unet, False) def unfreeze_unet(self): set_requires_grad(self.unet, True) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None): if not isinstance(image, torch.Tensor): raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}") image = image.to(device=device, dtype=dtype) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor init_latents = 0.18215 * init_latents init_latents = init_latents.repeat_interleave(batch_size, dim=0) noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def get_image_description(self, image): transformed_image = self.coca_transform(image).unsqueeze(0) with torch.no_grad(), torch.cuda.amp.autocast(): generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype)) generated = self.coca_tokenizer.decode(generated[0].cpu().numpy()) return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,") def get_clip_image_embeddings(self, image, batch_size): clip_image_input = self.feature_extractor.preprocess(image) clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half() image_embeddings_clip = self.clip_model.get_image_features(clip_image_features) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0) return image_embeddings_clip @torch.enable_grad() def cond_fn( self, latents, timestep, index, text_embeddings, noise_pred_original, original_image_embeddings_clip, clip_guidance_scale, ): latents = latents.detach().requires_grad_() latent_model_input = self.scheduler.scale_model_input(latents, timestep) # predict the noise residual noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) fac = torch.sqrt(beta_prod_t) sample = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, LMSDiscreteScheduler): sigma = self.scheduler.sigmas[index] sample = latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler)} not supported") # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor sample = 1 / 0.18215 * sample image = self.vae.decode(sample).sample image = (image / 2 + 0.5).clamp(0, 1) image = transforms.Resize(self.feature_extractor_size)(image) image = self.normalize(image).to(latents.dtype) image_embeddings_clip = self.clip_model.get_image_features(image) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale grads = -torch.autograd.grad(loss, latents)[0] if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents.detach() + grads * (sigma**2) noise_pred = noise_pred_original else: noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads return noise_pred, latents @torch.no_grad() def __call__( self, style_image: Union[torch.FloatTensor, PIL.Image.Image], content_image: Union[torch.FloatTensor, PIL.Image.Image], style_prompt: Optional[str] = None, content_prompt: Optional[str] = None, height: Optional[int] = 512, width: Optional[int] = 512, noise_strength: float = 0.6, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, batch_size: Optional[int] = 1, eta: float = 0.0, clip_guidance_scale: Optional[float] = 100, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, slerp_latent_style_strength: float = 0.8, slerp_prompt_style_strength: float = 0.1, slerp_clip_image_style_strength: float = 0.1, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if isinstance(generator, torch.Generator) and batch_size > 1: generator = [generator] + [None] * (batch_size - 1) coca_is_none = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] coca_is_none = [x[0] for x in coca_is_none if x[1]] coca_is_none_str = ", ".join(coca_is_none) # generate prompts with coca model if prompt is None if content_prompt is None: if len(coca_is_none): raise ValueError( f"Content prompt is None and CoCa [{coca_is_none_str}] is None." f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) content_prompt = self.get_image_description(content_image) if style_prompt is None: if len(coca_is_none): raise ValueError( f"Style prompt is None and CoCa [{coca_is_none_str}] is None." f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) style_prompt = self.get_image_description(style_image) # get prompt text embeddings for content and style content_text_input = self.tokenizer( content_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0] style_text_input = self.tokenizer( style_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0] text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings) # duplicate text embeddings for each generation per prompt text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0) # set timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device) latent_timestep = timesteps[:1].repeat(batch_size) # Preprocess image preprocessed_content_image = preprocess(content_image, width, height) content_latents = self.prepare_latents( preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator ) preprocessed_style_image = preprocess(style_image, width, height) style_latents = self.prepare_latents( preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator ) latents = slerp(slerp_latent_style_strength, content_latents, style_latents) if clip_guidance_scale > 0: content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size) style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size) clip_image_embeddings = slerp( slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: max_length = content_text_input.input_ids.shape[-1] uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt") uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform classifier free guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: text_embeddings_for_guidance = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) noise_pred, latents = self.cond_fn( latents, t, i, text_embeddings_for_guidance, noise_pred, clip_image_embeddings, clip_guidance_scale, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample progress_bar.update() # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py", "repo_id": "diffusers", "token_count": 8767 }
107
import inspect import os import numpy as np import torch import torch.nn.functional as nnf from PIL import Image from torch.optim.adam import Adam from tqdm import tqdm from diffusers import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput def retrieve_timesteps( scheduler, num_inference_steps=None, device=None, timesteps=None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class NullTextPipeline(StableDiffusionPipeline): def get_noise_pred(self, latents, t, context): latents_input = torch.cat([latents] * 2) guidance_scale = 7.5 noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"] noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) latents = self.prev_step(noise_pred, t, latents) return latents def get_noise_pred_single(self, latents, t, context): noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"] return noise_pred @torch.no_grad() def image2latent(self, image_path): image = Image.open(image_path).convert("RGB") image = np.array(image) image = torch.from_numpy(image).float() / 127.5 - 1 image = image.permute(2, 0, 1).unsqueeze(0).to(self.device) latents = self.vae.encode(image)["latent_dist"].mean latents = latents * 0.18215 return latents @torch.no_grad() def latent2image(self, latents): latents = 1 / 0.18215 * latents.detach() image = self.vae.decode(latents)["sample"].detach() image = self.processor.postprocess(image, output_type="pil")[0] return image def prev_step(self, model_output, timestep, sample): prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction return prev_sample def next_step(self, model_output, timestep, sample): timestep, next_timestep = ( min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999), timestep, ) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep] beta_prod_t = 1 - alpha_prod_t next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction return next_sample def null_optimization(self, latents, context, num_inner_steps, epsilon): uncond_embeddings, cond_embeddings = context.chunk(2) uncond_embeddings_list = [] latent_cur = latents[-1] bar = tqdm(total=num_inner_steps * self.num_inference_steps) for i in range(self.num_inference_steps): uncond_embeddings = uncond_embeddings.clone().detach() uncond_embeddings.requires_grad = True optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0)) latent_prev = latents[len(latents) - i - 2] t = self.scheduler.timesteps[i] with torch.no_grad(): noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings) for j in range(num_inner_steps): noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings) noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond) latents_prev_rec = self.prev_step(noise_pred, t, latent_cur) loss = nnf.mse_loss(latents_prev_rec, latent_prev) optimizer.zero_grad() loss.backward() optimizer.step() loss_item = loss.item() bar.update() if loss_item < epsilon + i * 2e-5: break for j in range(j + 1, num_inner_steps): bar.update() uncond_embeddings_list.append(uncond_embeddings[:1].detach()) with torch.no_grad(): context = torch.cat([uncond_embeddings, cond_embeddings]) latent_cur = self.get_noise_pred(latent_cur, t, context) bar.close() return uncond_embeddings_list @torch.no_grad() def ddim_inversion_loop(self, latent, context): self.scheduler.set_timesteps(self.num_inference_steps) _, cond_embeddings = context.chunk(2) all_latent = [latent] latent = latent.clone().detach() with torch.no_grad(): for i in range(0, self.num_inference_steps): t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1] noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"] latent = self.next_step(noise_pred, t, latent) all_latent.append(latent) return all_latent def get_context(self, prompt): uncond_input = self.tokenizer( [""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt" ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] text_input = self.tokenizer( [prompt], padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] context = torch.cat([uncond_embeddings, text_embeddings]) return context def invert( self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50 ): self.num_inference_steps = num_inference_steps context = self.get_context(prompt) latent = self.image2latent(image_path) ddim_latents = self.ddim_inversion_loop(latent, context) if os.path.exists(image_path + ".pt"): uncond_embeddings = torch.load(image_path + ".pt") else: uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon) uncond_embeddings = torch.stack(uncond_embeddings, 0) torch.save(uncond_embeddings, image_path + ".pt") return ddim_latents[-1], uncond_embeddings @torch.no_grad() def __call__( self, prompt, uncond_embeddings, inverted_latent, num_inference_steps: int = 50, timesteps=None, guidance_scale=7.5, negative_prompt=None, num_images_per_prompt=1, generator=None, latents=None, prompt_embeds=None, negative_prompt_embeds=None, output_type="pil", ): self._guidance_scale = guidance_scale # 0. Default height and width to unet height = self.unet.config.sample_size * self.vae_scale_factor width = self.unet.config.sample_size * self.vae_scale_factor # to deal with lora scaling and other possible forward hook callback_steps = None # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameter device = self._execution_device # 3. Encode input prompt prompt_embeds, _ = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) latents = inverted_latent with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"] noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] progress_bar.update() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] else: image = latents image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=[True] * image.shape[0] ) # Offload all models self.maybe_free_model_hooks() return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
diffusers/examples/community/pipeline_null_text_inversion.py/0
{ "file_path": "diffusers/examples/community/pipeline_null_text_inversion.py", "repo_id": "diffusers", "token_count": 5423 }
108
from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class TextInpainting(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text based inpainting using Stable Diffusion. Uses CLIPSeg to get a mask from the given text, then calls the Inpainting pipeline with the generated mask This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: segmentation_model ([`CLIPSegForImageSegmentation`]): CLIPSeg Model to generate mask from the given text. Please refer to the [model card]() for details. segmentation_processor ([`CLIPSegProcessor`]): CLIPSeg processor to get image, text features to translate prompt to English, if necessary. Please refer to the [model card](https://huggingface.co/docs/transformers/model_doc/clipseg) for details. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, segmentation_model: CLIPSegForImageSegmentation, segmentation_processor: CLIPSegProcessor, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["skip_prk_steps"] = True scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=segmentation_model, segmentation_processor=segmentation_processor, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image], text: str, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. text (`str``): The text to use to generate the mask. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # We use the input text to generate the mask inputs = self.segmentation_processor( text=[text], images=[image], padding="max_length", return_tensors="pt" ).to(self.device) outputs = self.segmentation_model(**inputs) mask = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy() mask_pil = self.numpy_to_pil(mask)[0].resize(image.size) # Run inpainting pipeline with the generated mask inpainting_pipeline = StableDiffusionInpaintPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, ) return inpainting_pipeline( prompt=prompt, image=image, mask_image=mask_pil, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, )
diffusers/examples/community/text_inpainting.py/0
{ "file_path": "diffusers/examples/community/text_inpainting.py", "repo_id": "diffusers", "token_count": 5469 }
109
# ControlNet training example for Stable Diffusion XL (SDXL) The `train_controlnet_sdxl.py` script shows how to implement the ControlNet training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/controlnet` folder and run ```bash pip install -r requirements_sdxl.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. ## Circle filling dataset The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. ## Training Our training examples use two test conditioning images. They can be downloaded by running ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` Then run `huggingface-cli login` to log into your Hugging Face account. This is needed to be able to push the trained ControlNet parameters to Hugging Face Hub. ```bash export MODEL_DIR="stabilityai/stable-diffusion-xl-base-1.0" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet_sdxl.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --mixed_precision="fp16" \ --resolution=1024 \ --learning_rate=1e-5 \ --max_train_steps=15000 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=100 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --report_to="wandb" \ --seed=42 \ --push_to_hub ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. ### Inference Once training is done, we can perform inference like so: ```python from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # speed up diffusion process with faster scheduler and memory optimization pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # remove following line if xformers is not installed or when using Torch 2.0. pipe.enable_xformers_memory_efficient_attention() # memory optimization. pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png").resize((1024, 1024)) prompt = "pale golden rod circle with old lace background" # generate image generator = torch.manual_seed(0) image = pipe( prompt, num_inference_steps=20, generator=generator, image=control_image ).images[0] image.save("./output.png") ``` ## Notes ### Specifying a better VAE SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of an alternative VAE (such as [`madebyollin/sdxl-vae-fp16-fix`](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). If you're using this VAE during training, you need to ensure you're using it during inference too. You do so by: ```diff + vae = AutoencoderKL.from_pretrained(vae_path_or_repo_id, torch_dtype=torch.float16) controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16, + vae=vae, )
diffusers/examples/controlnet/README_sdxl.md/0
{ "file_path": "diffusers/examples/controlnet/README_sdxl.md", "repo_id": "diffusers", "token_count": 1700 }
110
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to fine-tune Stable Diffusion for InstructPix2Pix.""" import argparse import logging import math import os import shutil from pathlib import Path import accelerate import datasets import numpy as np import PIL import requests import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionInstructPix2PixPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel from diffusers.utils import check_min_version, deprecate, is_wandb_available from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "fusing/instructpix2pix-1000-samples": ("input_image", "edit_prompt", "edited_image"), } WANDB_TABLE_COL_NAMES = ["original_image", "edited_image", "edit_prompt"] def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script for InstructPix2Pix.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--original_image_column", type=str, default="input_image", help="The column of the dataset containing the original image on which edits where made.", ) parser.add_argument( "--edited_image_column", type=str, default="edited_image", help="The column of the dataset containing the edited image.", ) parser.add_argument( "--edit_prompt_column", type=str, default="edit_prompt", help="The column of the dataset containing the edit instruction.", ) parser.add_argument( "--val_image_url", type=str, default=None, help="URL to the original image that you would like to edit (used during inference for debugging purposes).", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference." ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=1, help=( "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--output_dir", type=str, default="instruct-pix2pix-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=256, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--conditioning_dropout_prob", type=float, default=None, help="Conditioning dropout probability. Drops out the conditionings (image and edit prompt) used in training InstructPix2Pix. See section 3.2.1 in the paper: https://arxiv.org/abs/2211.09800.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def convert_to_np(image, resolution): image = image.convert("RGB").resize((resolution, resolution)) return np.array(image).transpose(2, 0, 1) def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision ) # InstructPix2Pix uses an additional image for conditioning. To accommodate that, # it uses 8 channels (instead of 4) in the first (conv) layer of the UNet. This UNet is # then fine-tuned on the custom InstructPix2Pix dataset. This modified UNet is initialized # from the pre-trained checkpoints. For the extra channels added to the first layer, they are # initialized to zero. logger.info("Initializing the InstructPix2Pix UNet from the pretrained UNet.") in_channels = 8 out_channels = unet.conv_in.out_channels unet.register_to_config(in_channels=in_channels) with torch.no_grad(): new_conv_in = nn.Conv2d( in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding ) new_conv_in.weight.zero_() new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) unet.conv_in = new_conv_in # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) # Create EMA for the unet. if args.use_ema: ema_unet = EMAModel(unet.parameters(), model_cls=UNet2DConditionModel, model_config=unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/main/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.original_image_column is None: original_image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: original_image_column = args.original_image_column if original_image_column not in column_names: raise ValueError( f"--original_image_column' value '{args.original_image_column}' needs to be one of: {', '.join(column_names)}" ) if args.edit_prompt_column is None: edit_prompt_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: edit_prompt_column = args.edit_prompt_column if edit_prompt_column not in column_names: raise ValueError( f"--edit_prompt_column' value '{args.edit_prompt_column}' needs to be one of: {', '.join(column_names)}" ) if args.edited_image_column is None: edited_image_column = dataset_columns[2] if dataset_columns is not None else column_names[2] else: edited_image_column = args.edited_image_column if edited_image_column not in column_names: raise ValueError( f"--edited_image_column' value '{args.edited_image_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(captions): inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), ] ) def preprocess_images(examples): original_images = np.concatenate( [convert_to_np(image, args.resolution) for image in examples[original_image_column]] ) edited_images = np.concatenate( [convert_to_np(image, args.resolution) for image in examples[edited_image_column]] ) # We need to ensure that the original and the edited images undergo the same # augmentation transforms. images = np.concatenate([original_images, edited_images]) images = torch.tensor(images) images = 2 * (images / 255) - 1 return train_transforms(images) def preprocess_train(examples): # Preprocess images. preprocessed_images = preprocess_images(examples) # Since the original and edited images were concatenated before # applying the transformations, we need to separate them and reshape # them accordingly. original_images, edited_images = preprocessed_images.chunk(2) original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution) # Collate the preprocessed images into the `examples`. examples["original_pixel_values"] = original_images examples["edited_pixel_values"] = edited_images # Preprocess the captions. captions = list(examples[edit_prompt_column]) examples["input_ids"] = tokenize_captions(captions) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): original_pixel_values = torch.stack([example["original_pixel_values"] for example in examples]) original_pixel_values = original_pixel_values.to(memory_format=torch.contiguous_format).float() edited_pixel_values = torch.stack([example["edited_pixel_values"] for example in examples]) edited_pixel_values = edited_pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return { "original_pixel_values": original_pixel_values, "edited_pixel_values": edited_pixel_values, "input_ids": input_ids, } # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("instruct-pix2pix", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() train_loss = 0.0 for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # We want to learn the denoising process w.r.t the edited images which # are conditioned on the original image (which was edited) and the edit instruction. # So, first, convert images to latent space. latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning. encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Get the additional image embedding for conditioning. # Instead of getting a diagonal Gaussian here, we simply take the mode. original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode() # Conditioning dropout to support classifier-free guidance during inference. For more details # check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800. if args.conditioning_dropout_prob is not None: random_p = torch.rand(bsz, device=latents.device, generator=generator) # Sample masks for the edit prompts. prompt_mask = random_p < 2 * args.conditioning_dropout_prob prompt_mask = prompt_mask.reshape(bsz, 1, 1) # Final text conditioning. null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0] encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states) # Sample masks for the original images. image_mask_dtype = original_image_embeds.dtype image_mask = 1 - ( (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype) * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype) ) image_mask = image_mask.reshape(bsz, 1, 1, 1) # Final image conditioning. original_image_embeds = image_mask * original_image_embeds # Concatenate the `original_image_embeds` with the `noisy_latents`. concatenated_noisy_latents = torch.cat([noisy_latents, original_image_embeds], dim=1) # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(concatenated_noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if ( (args.val_image_url is not None) and (args.validation_prompt is not None) and (epoch % args.validation_epochs == 0) ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) # The models need unwrapping because for compatibility in distributed training mode. pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=unwrap_model(unet), text_encoder=unwrap_model(text_encoder), vae=unwrap_model(vae), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference original_image = download_image(args.val_image_url) edited_images = [] with torch.autocast( str(accelerator.device).replace(":0", ""), enabled=accelerator.mixed_precision == "fp16" ): for _ in range(args.num_validation_images): edited_images.append( pipeline( args.validation_prompt, image=original_image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7, generator=generator, ).images[0] ) for tracker in accelerator.trackers: if tracker.name == "wandb": wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES) for edited_image in edited_images: wandb_table.add_data( wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt ) tracker.log({"validation": wandb_table}) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) del pipeline torch.cuda.empty_cache() # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=unwrap_model(text_encoder), vae=unwrap_model(vae), unet=unet, revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if args.validation_prompt is not None: edited_images = [] pipeline = pipeline.to(accelerator.device) with torch.autocast(str(accelerator.device).replace(":0", "")): for _ in range(args.num_validation_images): edited_images.append( pipeline( args.validation_prompt, image=original_image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7, generator=generator, ).images[0] ) for tracker in accelerator.trackers: if tracker.name == "wandb": wandb_table = wandb.Table(columns=WANDB_TABLE_COL_NAMES) for edited_image in edited_images: wandb_table.add_data( wandb.Image(original_image), wandb.Image(edited_image), args.validation_prompt ) tracker.log({"test": wandb_table}) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/instruct_pix2pix/train_instruct_pix2pix.py/0
{ "file_path": "diffusers/examples/instruct_pix2pix/train_instruct_pix2pix.py", "repo_id": "diffusers", "token_count": 19582 }
111
import argparse import itertools import math import os import random from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib from PIL import Image, ImageDraw from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDPMScheduler, StableDiffusionInpaintPipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def prepare_mask_and_masked_image(image, mask): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 mask = np.array(mask.convert("L")) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) masked_image = image * (mask < 0.5) return mask, masked_image # generate random masks def random_mask(im_shape, ratio=1, mask_full_image=False): mask = Image.new("L", im_shape, 0) draw = ImageDraw.Draw(mask) size = (random.randint(0, int(im_shape[0] * ratio)), random.randint(0, int(im_shape[1] * ratio))) # use this to always mask the whole image if mask_full_image: size = (int(im_shape[0] * ratio), int(im_shape[1] * ratio)) limits = (im_shape[0] - size[0] // 2, im_shape[1] - size[1] // 2) center = (random.randint(size[0] // 2, limits[0]), random.randint(size[1] // 2, limits[1])) draw_type = random.randint(0, 1) if draw_type == 0 or mask_full_image: draw.rectangle( (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), fill=255, ) else: draw.ellipse( (center[0] - size[0] // 2, center[1] - size[1] // 2, center[0] + size[0] // 2, center[1] + size[1] // 2), fill=255, ) return mask def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If not have enough images, additional images will be" " sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint and are suitable for resuming training" " using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.instance_data_dir is None: raise ValueError("You must specify a train data directory.") if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms_resize_and_crop = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), ] ) self.image_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") instance_image = self.image_transforms_resize_and_crop(instance_image) example["PIL_images"] = instance_image example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") class_image = self.image_transforms_resize_and_crop(class_image) example["class_images"] = self.image_transforms(class_image) example["class_PIL_images"] = class_image example["class_prompt_ids"] = self.tokenizer( self.class_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(): args = parse_args() logging_dir = Path(args.output_dir, args.logging_dir) project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with="tensorboard", project_config=project_config, ) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) if args.seed is not None: set_seed(args.seed) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 pipeline = StableDiffusionInpaintPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader( sample_dataset, batch_size=args.sample_batch_size, num_workers=1 ) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) transform_to_pil = transforms.ToPILImage() for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): bsz = len(example["prompt"]) fake_images = torch.rand((3, args.resolution, args.resolution)) transform_to_pil = transforms.ToPILImage() fake_pil_images = transform_to_pil(fake_images) fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True) images = pipeline(prompt=example["prompt"], mask_image=fake_mask, image=fake_pil_images).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder.gradient_checkpointing_enable() if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if args.with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pior_pil = [example["class_PIL_images"] for example in examples] masks = [] masked_images = [] for example in examples: pil_image = example["PIL_images"] # generate a random mask mask = random_mask(pil_image.size, 1, False) # prepare mask and masked image mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) masks.append(mask) masked_images.append(masked_image) if args.with_prior_preservation: for pil_image in pior_pil: # generate a random mask mask = random_mask(pil_image.size, 1, False) # prepare mask and masked image mask, masked_image = prepare_mask_and_masked_image(pil_image, mask) masks.append(mask) masked_images.append(masked_image) pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids masks = torch.stack(masks) masked_images = torch.stack(masked_images) batch = {"input_ids": input_ids, "pixel_values": pixel_values, "masks": masks, "masked_images": masked_images} return batch train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) accelerator.register_for_checkpointing(lr_scheduler) weight_dtype = torch.float32 if args.mixed_precision == "fp16": weight_dtype = torch.float16 elif args.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu. # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Convert masked images to latent space masked_latents = vae.encode( batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype) ).latent_dist.sample() masked_latents = masked_latents * vae.config.scaling_factor masks = batch["masks"] # resize the mask to latents shape as we concatenate the mask to the latents mask = torch.stack( [ torch.nn.functional.interpolate(mask, size=(args.resolution // 8, args.resolution // 8)) for mask in masks ] ) mask = mask.reshape(-1, 1, args.resolution // 8, args.resolution // 8) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # concatenate the noised latents with the mask and the masked latents latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() # Compute prior loss prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() # Create the pipeline using using the trained modules and save it. if accelerator.is_main_process: pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py/0
{ "file_path": "diffusers/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py", "repo_id": "diffusers", "token_count": 14370 }
112
# Multi Subject DreamBooth training [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. This `train_multi_subject_dreambooth.py` script shows how to implement the training procedure for one or more subjects and adapt it for stable diffusion. Note that this code is based off of the `examples/dreambooth/train_dreambooth.py` script as of 01/06/2022. This script was added by @kopsahlong, and is not actively maintained. However, if you come across anything that could use fixing, feel free to open an issue and tag @kopsahlong. ## Running locally with PyTorch ### Installing the dependencies Before running the script, make sure to install the library's training dependencies: To start, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd into the folder `diffusers/examples/research_projects/multi_subject_dreambooth` and run the following: ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### Multi Subject Training Example In order to have your model learn multiple concepts at once, we simply add in the additional data directories and prompts to our `instance_data_dir` and `instance_prompt` (as well as `class_data_dir` and `class_prompt` if `--with_prior_preservation` is specified) as one comma separated string. See an example with 2 subjects below, which learns a model for one dog subject and one human subject: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" # Subject 1 export INSTANCE_DIR_1="path-to-instance-images-concept-1" export INSTANCE_PROMPT_1="a photo of a sks dog" export CLASS_DIR_1="path-to-class-images-dog" export CLASS_PROMPT_1="a photo of a dog" # Subject 2 export INSTANCE_DIR_2="path-to-instance-images-concept-2" export INSTANCE_PROMPT_2="a photo of a t@y person" export CLASS_DIR_2="path-to-class-images-person" export CLASS_PROMPT_2="a photo of a person" accelerate launch train_multi_subject_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir="$INSTANCE_DIR_1,$INSTANCE_DIR_2" \ --output_dir=$OUTPUT_DIR \ --train_text_encoder \ --instance_prompt="$INSTANCE_PROMPT_1,$INSTANCE_PROMPT_2" \ --with_prior_preservation \ --prior_loss_weight=1.0 \ --class_data_dir="$CLASS_DIR_1,$CLASS_DIR_2" \ --class_prompt="$CLASS_PROMPT_1,$CLASS_PROMPT_2"\ --num_class_images=50 \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=1500 ``` This example shows training for 2 subjects, but please note that the model can be trained on any number of new concepts. This can be done by continuing to add in the corresponding directories and prompts to the corresponding comma separated string. Note also that in this script, `sks` and `t@y` were used as tokens to learn the new subjects ([this thread](https://github.com/XavierXiao/Dreambooth-Stable-Diffusion/issues/71) inspired the use of `t@y` as our second identifier). However, there may be better rare tokens to experiment with, and results also seemed to be good when more intuitive words are used. **Important**: New parameters are added to the script, making possible to validate the progress of the training by generating images at specified steps. Taking also into account that a comma separated list in a text field for a prompt it's never a good idea (simply because it is very common in prompts to have them as part of a regular text) we introduce the `concept_list` parameter: allowing to specify a json-like file where you can define the different configuration for each subject that you want to train. An example of how to generate the file: ```python import json # here we are using parameters for prior-preservation and validation as well. concepts_list = [ { "instance_prompt": "drawing of a t@y meme", "class_prompt": "drawing of a meme", "instance_data_dir": "/some_folder/meme_toy", "class_data_dir": "/data/meme", "validation_prompt": "drawing of a t@y meme about football in Uruguay", "validation_negative_prompt": "black and white" }, { "instance_prompt": "drawing of a sks sir", "class_prompt": "drawing of a sir", "instance_data_dir": "/some_other_folder/sir_sks", "class_data_dir": "/data/sir", "validation_prompt": "drawing of a sks sir with the Uruguayan sun in his chest", "validation_negative_prompt": "an old man", "validation_guidance_scale": 20, "validation_number_images": 3, "validation_inference_steps": 10 } ] with open("concepts_list.json", "w") as f: json.dump(concepts_list, f, indent=4) ``` And then just point to the file when executing the script: ```bash # exports... accelerate launch train_multi_subject_dreambooth.py \ # more parameters... --concepts_list="concepts_list.json" ``` You can use the helper from the script to get a better sense of each parameter. ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of a t@y person petting an sks dog" image = pipe(prompt, num_inference_steps=200, guidance_scale=7.5).images[0] image.save("person-petting-dog.png") ``` ### Inference from a training checkpoint You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it. ## Additional Dreambooth documentation Because the `train_multi_subject_dreambooth.py` script here was forked from an original version of `train_dreambooth.py` in the `examples/dreambooth` folder, I've included the original applicable training documentation for single subject examples below. This should explain how to play with training variables such as prior preservation, fine tuning the text encoder, etc. which is still applicable to our multi subject training code. Note also that the examples below, which are single subject examples, also work with `train_multi_subject_dreambooth.py`, as this script supports 1 (or more) subjects. ### Single subject dog toy example Let's get our dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. This will be our training data. And launch the training using **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 ``` ### Training with prior-preservation loss Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training on a 16GB GPU: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Training on a 8 GB GPU: By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some tensors from VRAM to either CPU or NVME allowing to train with less VRAM. DeepSpeed needs to be enabled with `accelerate config`. During configuration answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16 mixed precision and offloading both parameters and optimizer state to cpu it's possible to train on under 8 GB VRAM with a drawback of requiring significantly more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. Changing the default Adam optimizer to DeepSpeed's special version of Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer does not seem to be compatible with DeepSpeed at the moment. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch --mixed_precision="fp16" train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --sample_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Fine-tune text encoder with the UNet. The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam \ --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### Using DreamBooth for other pipelines than Stable Diffusion Altdiffusion also support dreambooth now, the runing comman is basically the same as above, all you need to do is replace the `MODEL_NAME` like this: One can now simply change the `pretrained_model_name_or_path` to another architecture such as [`AltDiffusion`](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion). ``` export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9" or export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion" ``` ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint).
diffusers/examples/research_projects/multi_subject_dreambooth/README.md/0
{ "file_path": "diffusers/examples/research_projects/multi_subject_dreambooth/README.md", "repo_id": "diffusers", "token_count": 4807 }
113
## Textual Inversion fine-tuning example [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running on Colab Colab for training [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) Colab for inference [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Cat toy example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-5`, so you'll need to visit [its card](https://huggingface.co/runwayml/stable-diffusion-v1-5), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash huggingface-cli login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> Now let's get our dataset. For this example we will use some cat images: https://huggingface.co/datasets/diffusers/cat_toy_example . Let's first download it locally: ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download("diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes") ``` This will be our training data. Now we can launch the training using ## Use ONNXRuntime to accelerate training In order to leverage onnxruntime to accelerate training, please use textual_inversion.py The command to train on custom data with onnxruntime: ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="path-to-dir-containing-images" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" ``` Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.
diffusers/examples/research_projects/onnxruntime/textual_inversion/README.md/0
{ "file_path": "diffusers/examples/research_projects/onnxruntime/textual_inversion/README.md", "repo_id": "diffusers", "token_count": 1117 }
114
import argparse import copy import itertools import logging import math import os import random import shutil from pathlib import Path import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms.v2 as transforms_v2 import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import create_repo, upload_folder from packaging import version from peft import LoraConfig, PeftModel, get_peft_model from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import Dataset from tqdm.auto import tqdm from transformers import AutoTokenizer, CLIPTextModel import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.20.1") logger = get_logger(__name__) def make_mask(images, resolution, times=30): mask, times = torch.ones_like(images[0:1, :, :]), np.random.randint(1, times) min_size, max_size, margin = np.array([0.03, 0.25, 0.01]) * resolution max_size = min(max_size, resolution - margin * 2) for _ in range(times): width = np.random.randint(int(min_size), int(max_size)) height = np.random.randint(int(min_size), int(max_size)) x_start = np.random.randint(int(margin), resolution - int(margin) - width + 1) y_start = np.random.randint(int(margin), resolution - int(margin) - height + 1) mask[:, y_start : y_start + height, x_start : x_start + width] = 0 mask = 1 - mask if random.random() < 0.5 else mask return mask def save_model_card( repo_id: str, images=None, base_model=str, repo_folder=None, ): img_str = "" for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} prompt: "a photo of sks" tags: - stable-diffusion-inpainting - stable-diffusion-inpainting-diffusers - text-to-image - diffusers - realfill - diffusers-training inference: true --- """ model_card = f""" # RealFill - {repo_id} This is a realfill model derived from {base_model}. The weights were trained using [RealFill](https://realfill.github.io/). You can find some example images in the following. \n {img_str} """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def log_validation( text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch, ): logger.info(f"Running validation... \nGenerating {args.num_validation_images} images") # create pipeline (note: unet and vae are loaded again in float32) pipeline = StableDiffusionInpaintPipeline.from_pretrained( args.pretrained_model_name_or_path, tokenizer=tokenizer, revision=args.revision, torch_dtype=weight_dtype, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed) target_dir = Path(args.train_data_dir) / "target" target_image, target_mask = target_dir / "target.png", target_dir / "mask.png" image, mask_image = Image.open(target_image), Image.open(target_mask) if image.mode != "RGB": image = image.convert("RGB") images = [] for _ in range(args.num_validation_images): image = pipeline( prompt="a photo of sks", image=image, mask_image=mask_image, num_inference_steps=25, guidance_scale=5, generator=generator, ).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log({"validation": [wandb.Image(image, caption=str(i)) for i, image in enumerate(images)]}) del pipeline torch.cuda.empty_cache() return images def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data of images.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_conditioning`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run realfill validation every X steps. RealFill validation consists of running the conditioning" " `args.validation_conditioning` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="realfill-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--unet_learning_rate", type=float, default=2e-4, help="Learning rate to use for unet.", ) parser.add_argument( "--text_encoder_learning_rate", type=float, default=4e-5, help="Learning rate to use for text encoder.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--lora_rank", type=int, default=16, help=("The dimension of the LoRA update matrices."), ) parser.add_argument( "--lora_alpha", type=int, default=27, help=("The alpha constant of the LoRA update matrices."), ) parser.add_argument( "--lora_dropout", type=float, default=0.0, help="The dropout rate of the LoRA update matrices.", ) parser.add_argument( "--lora_bias", type=str, default="none", help="The bias type of the Lora update matrices. Must be 'none', 'all' or 'lora_only'.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank return args class RealFillDataset(Dataset): """ A dataset to prepare the training and conditioning images and the masks with the dummy prompt for fine-tuning the model. It pre-processes the images, masks and tokenizes the prompts. """ def __init__( self, train_data_root, tokenizer, size=512, ): self.size = size self.tokenizer = tokenizer self.ref_data_root = Path(train_data_root) / "ref" self.target_image = Path(train_data_root) / "target" / "target.png" self.target_mask = Path(train_data_root) / "target" / "mask.png" if not (self.ref_data_root.exists() and self.target_image.exists() and self.target_mask.exists()): raise ValueError("Train images root doesn't exists.") self.train_images_path = list(self.ref_data_root.iterdir()) + [self.target_image] self.num_train_images = len(self.train_images_path) self.train_prompt = "a photo of sks" self.transform = transforms_v2.Compose( [ transforms_v2.ToImage(), transforms_v2.RandomResize(size, int(1.125 * size)), transforms_v2.RandomCrop(size), transforms_v2.ToDtype(torch.float32, scale=True), transforms_v2.Normalize([0.5], [0.5]), ] ) def __len__(self): return self.num_train_images def __getitem__(self, index): example = {} image = Image.open(self.train_images_path[index]) image = exif_transpose(image) if not image.mode == "RGB": image = image.convert("RGB") if index < len(self) - 1: weighting = Image.new("L", image.size) else: weighting = Image.open(self.target_mask) weighting = exif_transpose(weighting) image, weighting = self.transform(image, weighting) example["images"], example["weightings"] = image, weighting < 0 if random.random() < 0.1: example["masks"] = torch.ones_like(example["images"][0:1, :, :]) else: example["masks"] = make_mask(example["images"], self.size) example["conditioning_images"] = example["images"] * (example["masks"] < 0.5) train_prompt = "" if random.random() < 0.1 else self.train_prompt example["prompt_ids"] = self.tokenizer( train_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(examples): input_ids = [example["prompt_ids"] for example in examples] images = [example["images"] for example in examples] masks = [example["masks"] for example in examples] weightings = [example["weightings"] for example in examples] conditioning_images = [example["conditioning_images"] for example in examples] images = torch.stack(images) images = images.to(memory_format=torch.contiguous_format).float() masks = torch.stack(masks) masks = masks.to(memory_format=torch.contiguous_format).float() weightings = torch.stack(weightings) weightings = weightings.to(memory_format=torch.contiguous_format).float() conditioning_images = torch.stack(conditioning_images) conditioning_images = conditioning_images.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "images": images, "masks": masks, "weightings": weightings, "conditioning_images": conditioning_images, } return batch def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") wandb.login(key=args.wandb_key) wandb.init(project=args.wandb_project_name) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) config = LoraConfig( r=args.lora_rank, lora_alpha=args.lora_alpha, target_modules=["to_k", "to_q", "to_v", "key", "query", "value"], lora_dropout=args.lora_dropout, bias=args.lora_bias, ) unet = get_peft_model(unet, config) config = LoraConfig( r=args.lora_rank, lora_alpha=args.lora_alpha, target_modules=["k_proj", "q_proj", "v_proj"], lora_dropout=args.lora_dropout, bias=args.lora_bias, ) text_encoder = get_peft_model(text_encoder, config) vae.requires_grad_(False) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() text_encoder.gradient_checkpointing_enable() # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: for model in models: sub_dir = ( "unet" if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model)) else "text_encoder" ) model.save_pretrained(os.path.join(output_dir, sub_dir)) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: # pop models so that they are not loaded again model = models.pop() sub_dir = ( "unet" if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model)) else "text_encoder" ) model_cls = ( UNet2DConditionModel if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model)) else CLIPTextModel ) load_model = model_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder=sub_dir) load_model = PeftModel.from_pretrained(load_model, input_dir, subfolder=sub_dir) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.unet_learning_rate = ( args.unet_learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) args.text_encoder_learning_rate = ( args.text_encoder_learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation optimizer = optimizer_class( [ {"params": unet.parameters(), "lr": args.unet_learning_rate}, {"params": text_encoder.parameters(), "lr": args.text_encoder_learning_rate}, ], betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = RealFillDataset( train_data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=1, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. unet, text_encoder, optimizer, train_dataloader = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader ) # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = vars(copy.deepcopy(args)) accelerator.init_trackers("realfill", config=tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): unet.train() text_encoder.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet, text_encoder): # Convert images to latent space latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Convert masked images to latent space conditionings = vae.encode(batch["conditioning_images"].to(dtype=weight_dtype)).latent_dist.sample() conditionings = conditionings * 0.18215 # Downsample mask and weighting so that they match with the latents masks, size = batch["masks"].to(dtype=weight_dtype), latents.shape[2:] masks = F.interpolate(masks, size=size) weightings = batch["weightings"].to(dtype=weight_dtype) weightings = F.interpolate(weightings, size=size) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Concatenate noisy latents, masks and conditionings to get inputs to unet inputs = torch.cat([noisy_latents, masks, conditionings], dim=1) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(inputs, timesteps, encoder_hidden_states).sample # Compute the diffusion loss assert noise_scheduler.config.prediction_type == "epsilon" loss = (weightings * F.mse_loss(model_pred.float(), noise.float(), reduction="none")).mean() # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = itertools.chain(unet.parameters(), text_encoder.parameters()) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step % args.validation_steps == 0: log_validation( text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) logs = {"loss": loss.detach().item()} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Save the lora layers accelerator.wait_for_everyone() if accelerator.is_main_process: pipeline = StableDiffusionInpaintPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet, keep_fp32_wrapper=True).merge_and_unload(), text_encoder=accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True).merge_and_unload(), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) # Final inference images = log_validation( text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/realfill/train_realfill.py/0
{ "file_path": "diffusers/examples/research_projects/realfill/train_realfill.py", "repo_id": "diffusers", "token_count": 16396 }
115
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors from diffusers import DiffusionPipeline # noqa: E402 sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextToImageLoRA(ExamplesTestsAccelerate): def test_text_to_image_lora_sdxl_checkpointing_checkpoints_total_limit(self): prompt = "a prompt" pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # resume and we should try to checkpoint at 6, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image_lora.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 8 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --seed=0 --num_validation_images=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, ) class TextToImageLoRASDXL(ExamplesTestsAccelerate): def test_text_to_image_lora_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) def test_text_to_image_lora_sdxl_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. keys = lora_state_dict.keys() starts_with_unet = all( k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys ) self.assertTrue(starts_with_unet) def test_text_to_image_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self): prompt = "a prompt" pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --train_text_encoder --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
diffusers/examples/text_to_image/test_text_to_image_lora.py/0
{ "file_path": "diffusers/examples/text_to_image/test_text_to_image_lora.py", "repo_id": "diffusers", "token_count": 6179 }
116
import argparse import time from pathlib import Path from typing import Any, Dict, Literal import torch from diffusers import AsymmetricAutoencoderKL ASYMMETRIC_AUTOENCODER_KL_x_1_5_CONFIG = { "in_channels": 3, "out_channels": 3, "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], "down_block_out_channels": [128, 256, 512, 512], "layers_per_down_block": 2, "up_block_types": [ "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", ], "up_block_out_channels": [192, 384, 768, 768], "layers_per_up_block": 3, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": 32, "sample_size": 256, "scaling_factor": 0.18215, } ASYMMETRIC_AUTOENCODER_KL_x_2_CONFIG = { "in_channels": 3, "out_channels": 3, "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], "down_block_out_channels": [128, 256, 512, 512], "layers_per_down_block": 2, "up_block_types": [ "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", ], "up_block_out_channels": [256, 512, 1024, 1024], "layers_per_up_block": 5, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": 32, "sample_size": 256, "scaling_factor": 0.18215, } def convert_asymmetric_autoencoder_kl_state_dict(original_state_dict: Dict[str, Any]) -> Dict[str, Any]: converted_state_dict = {} for k, v in original_state_dict.items(): if k.startswith("encoder."): converted_state_dict[ k.replace("encoder.down.", "encoder.down_blocks.") .replace("encoder.mid.", "encoder.mid_block.") .replace("encoder.norm_out.", "encoder.conv_norm_out.") .replace(".downsample.", ".downsamplers.0.") .replace(".nin_shortcut.", ".conv_shortcut.") .replace(".block.", ".resnets.") .replace(".block_1.", ".resnets.0.") .replace(".block_2.", ".resnets.1.") .replace(".attn_1.k.", ".attentions.0.to_k.") .replace(".attn_1.q.", ".attentions.0.to_q.") .replace(".attn_1.v.", ".attentions.0.to_v.") .replace(".attn_1.proj_out.", ".attentions.0.to_out.0.") .replace(".attn_1.norm.", ".attentions.0.group_norm.") ] = v elif k.startswith("decoder.") and "up_layers" not in k: converted_state_dict[ k.replace("decoder.encoder.", "decoder.condition_encoder.") .replace(".norm_out.", ".conv_norm_out.") .replace(".up.0.", ".up_blocks.3.") .replace(".up.1.", ".up_blocks.2.") .replace(".up.2.", ".up_blocks.1.") .replace(".up.3.", ".up_blocks.0.") .replace(".block.", ".resnets.") .replace("mid", "mid_block") .replace(".0.upsample.", ".0.upsamplers.0.") .replace(".1.upsample.", ".1.upsamplers.0.") .replace(".2.upsample.", ".2.upsamplers.0.") .replace(".nin_shortcut.", ".conv_shortcut.") .replace(".block_1.", ".resnets.0.") .replace(".block_2.", ".resnets.1.") .replace(".attn_1.k.", ".attentions.0.to_k.") .replace(".attn_1.q.", ".attentions.0.to_q.") .replace(".attn_1.v.", ".attentions.0.to_v.") .replace(".attn_1.proj_out.", ".attentions.0.to_out.0.") .replace(".attn_1.norm.", ".attentions.0.group_norm.") ] = v elif k.startswith("quant_conv."): converted_state_dict[k] = v elif k.startswith("post_quant_conv."): converted_state_dict[k] = v else: print(f" skipping key `{k}`") # fix weights shape for k, v in converted_state_dict.items(): if ( (k.startswith("encoder.mid_block.attentions.0") or k.startswith("decoder.mid_block.attentions.0")) and k.endswith("weight") and ("to_q" in k or "to_k" in k or "to_v" in k or "to_out" in k) ): converted_state_dict[k] = converted_state_dict[k][:, :, 0, 0] return converted_state_dict def get_asymmetric_autoencoder_kl_from_original_checkpoint( scale: Literal["1.5", "2"], original_checkpoint_path: str, map_location: torch.device ) -> AsymmetricAutoencoderKL: print("Loading original state_dict") original_state_dict = torch.load(original_checkpoint_path, map_location=map_location) original_state_dict = original_state_dict["state_dict"] print("Converting state_dict") converted_state_dict = convert_asymmetric_autoencoder_kl_state_dict(original_state_dict) kwargs = ASYMMETRIC_AUTOENCODER_KL_x_1_5_CONFIG if scale == "1.5" else ASYMMETRIC_AUTOENCODER_KL_x_2_CONFIG print("Initializing AsymmetricAutoencoderKL model") asymmetric_autoencoder_kl = AsymmetricAutoencoderKL(**kwargs) print("Loading weight from converted state_dict") asymmetric_autoencoder_kl.load_state_dict(converted_state_dict) asymmetric_autoencoder_kl.eval() print("AsymmetricAutoencoderKL successfully initialized") return asymmetric_autoencoder_kl if __name__ == "__main__": start = time.time() parser = argparse.ArgumentParser() parser.add_argument( "--scale", default=None, type=str, required=True, help="Asymmetric VQGAN scale: `1.5` or `2`", ) parser.add_argument( "--original_checkpoint_path", default=None, type=str, required=True, help="Path to the original Asymmetric VQGAN checkpoint", ) parser.add_argument( "--output_path", default=None, type=str, required=True, help="Path to save pretrained AsymmetricAutoencoderKL model", ) parser.add_argument( "--map_location", default="cpu", type=str, required=False, help="The device passed to `map_location` when loading the checkpoint", ) args = parser.parse_args() assert args.scale in ["1.5", "2"], f"{args.scale} should be `1.5` of `2`" assert Path(args.original_checkpoint_path).is_file() asymmetric_autoencoder_kl = get_asymmetric_autoencoder_kl_from_original_checkpoint( scale=args.scale, original_checkpoint_path=args.original_checkpoint_path, map_location=torch.device(args.map_location), ) print("Saving pretrained AsymmetricAutoencoderKL") asymmetric_autoencoder_kl.save_pretrained(args.output_path) print(f"Done in {time.time() - start:.2f} seconds")
diffusers/scripts/convert_asymmetric_vqgan_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_asymmetric_vqgan_to_diffusers.py", "repo_id": "diffusers", "token_count": 3351 }
117
import argparse import os import tempfile import torch from accelerate import load_checkpoint_and_dispatch from diffusers import UNet2DConditionModel from diffusers.models.transformers.prior_transformer import PriorTransformer from diffusers.models.vq_model import VQModel """ Example - From the diffusers root directory: Download weights: ```sh $ wget https://huggingface.co/ai-forever/Kandinsky_2.1/blob/main/prior_fp16.ckpt ``` Convert the model: ```sh python scripts/convert_kandinsky_to_diffusers.py \ --prior_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/prior_fp16.ckpt \ --clip_stat_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/ViT-L-14_stats.th \ --text2img_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/decoder_fp16.ckpt \ --inpaint_text2img_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/inpainting_fp16.ckpt \ --movq_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/movq_final.ckpt \ --dump_path /home/yiyi_huggingface_co/dump \ --debug decoder ``` """ # prior PRIOR_ORIGINAL_PREFIX = "model" # Uses default arguments PRIOR_CONFIG = {} def prior_model_from_original_config(): model = PriorTransformer(**PRIOR_CONFIG) return model def prior_original_checkpoint_to_diffusers_checkpoint(model, checkpoint, clip_stats_checkpoint): diffusers_checkpoint = {} # <original>.time_embed.0 -> <diffusers>.time_embedding.linear_1 diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.weight"], "time_embedding.linear_1.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.bias"], } ) # <original>.clip_img_proj -> <diffusers>.proj_in diffusers_checkpoint.update( { "proj_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.weight"], "proj_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.bias"], } ) # <original>.text_emb_proj -> <diffusers>.embedding_proj diffusers_checkpoint.update( { "embedding_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.weight"], "embedding_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.bias"], } ) # <original>.text_enc_proj -> <diffusers>.encoder_hidden_states_proj diffusers_checkpoint.update( { "encoder_hidden_states_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.weight"], "encoder_hidden_states_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.bias"], } ) # <original>.positional_embedding -> <diffusers>.positional_embedding diffusers_checkpoint.update({"positional_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.positional_embedding"]}) # <original>.prd_emb -> <diffusers>.prd_embedding diffusers_checkpoint.update({"prd_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.prd_emb"]}) # <original>.time_embed.2 -> <diffusers>.time_embedding.linear_2 diffusers_checkpoint.update( { "time_embedding.linear_2.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.weight"], "time_embedding.linear_2.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.bias"], } ) # <original>.resblocks.<x> -> <diffusers>.transformer_blocks.<x> for idx in range(len(model.transformer_blocks)): diffusers_transformer_prefix = f"transformer_blocks.{idx}" original_transformer_prefix = f"{PRIOR_ORIGINAL_PREFIX}.transformer.resblocks.{idx}" # <original>.attn -> <diffusers>.attn1 diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1" original_attention_prefix = f"{original_transformer_prefix}.attn" diffusers_checkpoint.update( prior_attention_to_diffusers( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, original_attention_prefix=original_attention_prefix, attention_head_dim=model.attention_head_dim, ) ) # <original>.mlp -> <diffusers>.ff diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff" original_ff_prefix = f"{original_transformer_prefix}.mlp" diffusers_checkpoint.update( prior_ff_to_diffusers( checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix ) ) # <original>.ln_1 -> <diffusers>.norm1 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[ f"{original_transformer_prefix}.ln_1.weight" ], f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"], } ) # <original>.ln_2 -> <diffusers>.norm3 diffusers_checkpoint.update( { f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[ f"{original_transformer_prefix}.ln_2.weight" ], f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"], } ) # <original>.final_ln -> <diffusers>.norm_out diffusers_checkpoint.update( { "norm_out.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.weight"], "norm_out.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.bias"], } ) # <original>.out_proj -> <diffusers>.proj_to_clip_embeddings diffusers_checkpoint.update( { "proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.weight"], "proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.bias"], } ) # clip stats clip_mean, clip_std = clip_stats_checkpoint clip_mean = clip_mean[None, :] clip_std = clip_std[None, :] diffusers_checkpoint.update({"clip_mean": clip_mean, "clip_std": clip_std}) return diffusers_checkpoint def prior_attention_to_diffusers( checkpoint, *, diffusers_attention_prefix, original_attention_prefix, attention_head_dim ): diffusers_checkpoint = {} # <original>.c_qkv -> <diffusers>.{to_q, to_k, to_v} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{original_attention_prefix}.c_qkv.weight"], bias=checkpoint[f"{original_attention_prefix}.c_qkv.bias"], split=3, chunk_size=attention_head_dim, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.c_proj -> <diffusers>.to_out.0 diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{original_attention_prefix}.c_proj.weight"], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{original_attention_prefix}.c_proj.bias"], } ) return diffusers_checkpoint def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix): diffusers_checkpoint = { # <original>.c_fc -> <diffusers>.net.0.proj f"{diffusers_ff_prefix}.net.{0}.proj.weight": checkpoint[f"{original_ff_prefix}.c_fc.weight"], f"{diffusers_ff_prefix}.net.{0}.proj.bias": checkpoint[f"{original_ff_prefix}.c_fc.bias"], # <original>.c_proj -> <diffusers>.net.2 f"{diffusers_ff_prefix}.net.{2}.weight": checkpoint[f"{original_ff_prefix}.c_proj.weight"], f"{diffusers_ff_prefix}.net.{2}.bias": checkpoint[f"{original_ff_prefix}.c_proj.bias"], } return diffusers_checkpoint # done prior # unet # We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can # update then. UNET_CONFIG = { "act_fn": "silu", "addition_embed_type": "text_image", "addition_embed_type_num_heads": 64, "attention_head_dim": 64, "block_out_channels": [384, 768, 1152, 1536], "center_input_sample": False, "class_embed_type": None, "class_embeddings_concat": False, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 768, "cross_attention_norm": None, "down_block_types": [ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", ], "downsample_padding": 1, "dual_cross_attention": False, "encoder_hid_dim": 1024, "encoder_hid_dim_type": "text_image_proj", "flip_sin_to_cos": True, "freq_shift": 0, "in_channels": 4, "layers_per_block": 3, "mid_block_only_cross_attention": None, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "norm_eps": 1e-05, "norm_num_groups": 32, "num_class_embeds": None, "only_cross_attention": False, "out_channels": 8, "projection_class_embeddings_input_dim": None, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": False, "resnet_time_scale_shift": "scale_shift", "sample_size": 64, "time_cond_proj_dim": None, "time_embedding_act_fn": None, "time_embedding_dim": None, "time_embedding_type": "positional", "timestep_post_act": None, "up_block_types": [ "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D", ], "upcast_attention": False, "use_linear_projection": False, } def unet_model_from_original_config(): model = UNet2DConditionModel(**UNET_CONFIG) return model def unet_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} num_head_channels = UNET_CONFIG["attention_head_dim"] diffusers_checkpoint.update(unet_time_embeddings(checkpoint)) diffusers_checkpoint.update(unet_conv_in(checkpoint)) diffusers_checkpoint.update(unet_add_embedding(checkpoint)) diffusers_checkpoint.update(unet_encoder_hid_proj(checkpoint)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, num_head_channels=num_head_channels, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.input_blocks -> <diffusers>.down_blocks diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, num_head_channels=num_head_channels, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, num_head_channels=num_head_channels, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint)) diffusers_checkpoint.update(unet_conv_out(checkpoint)) return diffusers_checkpoint # done unet # inpaint unet # We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can # update then. INPAINT_UNET_CONFIG = { "act_fn": "silu", "addition_embed_type": "text_image", "addition_embed_type_num_heads": 64, "attention_head_dim": 64, "block_out_channels": [384, 768, 1152, 1536], "center_input_sample": False, "class_embed_type": None, "class_embeddings_concat": None, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 768, "cross_attention_norm": None, "down_block_types": [ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", "SimpleCrossAttnDownBlock2D", ], "downsample_padding": 1, "dual_cross_attention": False, "encoder_hid_dim": 1024, "encoder_hid_dim_type": "text_image_proj", "flip_sin_to_cos": True, "freq_shift": 0, "in_channels": 9, "layers_per_block": 3, "mid_block_only_cross_attention": None, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "norm_eps": 1e-05, "norm_num_groups": 32, "num_class_embeds": None, "only_cross_attention": False, "out_channels": 8, "projection_class_embeddings_input_dim": None, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": False, "resnet_time_scale_shift": "scale_shift", "sample_size": 64, "time_cond_proj_dim": None, "time_embedding_act_fn": None, "time_embedding_dim": None, "time_embedding_type": "positional", "timestep_post_act": None, "up_block_types": [ "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D", ], "upcast_attention": False, "use_linear_projection": False, } def inpaint_unet_model_from_original_config(): model = UNet2DConditionModel(**INPAINT_UNET_CONFIG) return model def inpaint_unet_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} num_head_channels = INPAINT_UNET_CONFIG["attention_head_dim"] diffusers_checkpoint.update(unet_time_embeddings(checkpoint)) diffusers_checkpoint.update(unet_conv_in(checkpoint)) diffusers_checkpoint.update(unet_add_embedding(checkpoint)) diffusers_checkpoint.update(unet_encoder_hid_proj(checkpoint)) # <original>.input_blocks -> <diffusers>.down_blocks original_down_block_idx = 1 for diffusers_down_block_idx in range(len(model.down_blocks)): checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint( model, checkpoint, diffusers_down_block_idx=diffusers_down_block_idx, original_down_block_idx=original_down_block_idx, num_head_channels=num_head_channels, ) original_down_block_idx += num_original_down_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.input_blocks -> <diffusers>.down_blocks diffusers_checkpoint.update( unet_midblock_to_diffusers_checkpoint( model, checkpoint, num_head_channels=num_head_channels, ) ) # <original>.output_blocks -> <diffusers>.up_blocks original_up_block_idx = 0 for diffusers_up_block_idx in range(len(model.up_blocks)): checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint( model, checkpoint, diffusers_up_block_idx=diffusers_up_block_idx, original_up_block_idx=original_up_block_idx, num_head_channels=num_head_channels, ) original_up_block_idx += num_original_up_blocks diffusers_checkpoint.update(checkpoint_update) # done <original>.output_blocks -> <diffusers>.up_blocks diffusers_checkpoint.update(unet_conv_norm_out(checkpoint)) diffusers_checkpoint.update(unet_conv_out(checkpoint)) return diffusers_checkpoint # done inpaint unet # unet utils # <original>.time_embed -> <diffusers>.time_embedding def unet_time_embeddings(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "time_embedding.linear_1.weight": checkpoint["time_embed.0.weight"], "time_embedding.linear_1.bias": checkpoint["time_embed.0.bias"], "time_embedding.linear_2.weight": checkpoint["time_embed.2.weight"], "time_embedding.linear_2.bias": checkpoint["time_embed.2.bias"], } ) return diffusers_checkpoint # <original>.input_blocks.0 -> <diffusers>.conv_in def unet_conv_in(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_in.weight": checkpoint["input_blocks.0.0.weight"], "conv_in.bias": checkpoint["input_blocks.0.0.bias"], } ) return diffusers_checkpoint def unet_add_embedding(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "add_embedding.text_norm.weight": checkpoint["ln_model_n.weight"], "add_embedding.text_norm.bias": checkpoint["ln_model_n.bias"], "add_embedding.text_proj.weight": checkpoint["proj_n.weight"], "add_embedding.text_proj.bias": checkpoint["proj_n.bias"], "add_embedding.image_proj.weight": checkpoint["img_layer.weight"], "add_embedding.image_proj.bias": checkpoint["img_layer.bias"], } ) return diffusers_checkpoint def unet_encoder_hid_proj(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "encoder_hid_proj.image_embeds.weight": checkpoint["clip_to_seq.weight"], "encoder_hid_proj.image_embeds.bias": checkpoint["clip_to_seq.bias"], "encoder_hid_proj.text_proj.weight": checkpoint["to_model_dim_n.weight"], "encoder_hid_proj.text_proj.bias": checkpoint["to_model_dim_n.bias"], } ) return diffusers_checkpoint # <original>.out.0 -> <diffusers>.conv_norm_out def unet_conv_norm_out(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_norm_out.weight": checkpoint["out.0.weight"], "conv_norm_out.bias": checkpoint["out.0.bias"], } ) return diffusers_checkpoint # <original>.out.2 -> <diffusers>.conv_out def unet_conv_out(checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update( { "conv_out.weight": checkpoint["out.2.weight"], "conv_out.bias": checkpoint["out.2.bias"], } ) return diffusers_checkpoint # <original>.input_blocks -> <diffusers>.down_blocks def unet_downblock_to_diffusers_checkpoint( model, checkpoint, *, diffusers_down_block_idx, original_down_block_idx, num_head_channels ): diffusers_checkpoint = {} diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.resnets" original_down_block_prefix = "input_blocks" down_block = model.down_blocks[diffusers_down_block_idx] num_resnets = len(down_block.resnets) if down_block.downsamplers is None: downsampler = False else: assert len(down_block.downsamplers) == 1 downsampler = True # The downsample block is also a resnet num_resnets += 1 for resnet_idx_inc in range(num_resnets): full_resnet_prefix = f"{original_down_block_prefix}.{original_down_block_idx + resnet_idx_inc}.0" if downsampler and resnet_idx_inc == num_resnets - 1: # this is a downsample block full_diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.downsamplers.0" else: # this is a regular resnet block full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix ) ) if hasattr(down_block, "attentions"): num_attentions = len(down_block.attentions) diffusers_attention_prefix = f"down_blocks.{diffusers_down_block_idx}.attentions" for attention_idx_inc in range(num_attentions): full_attention_prefix = f"{original_down_block_prefix}.{original_down_block_idx + attention_idx_inc}.1" full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}" diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, attention_prefix=full_attention_prefix, diffusers_attention_prefix=full_diffusers_attention_prefix, num_head_channels=num_head_channels, ) ) num_original_down_blocks = num_resnets return diffusers_checkpoint, num_original_down_blocks # <original>.middle_block -> <diffusers>.mid_block def unet_midblock_to_diffusers_checkpoint(model, checkpoint, *, num_head_channels): diffusers_checkpoint = {} # block 0 original_block_idx = 0 diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, diffusers_resnet_prefix="mid_block.resnets.0", resnet_prefix=f"middle_block.{original_block_idx}", ) ) original_block_idx += 1 # optional block 1 if hasattr(model.mid_block, "attentions") and model.mid_block.attentions[0] is not None: diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix="mid_block.attentions.0", attention_prefix=f"middle_block.{original_block_idx}", num_head_channels=num_head_channels, ) ) original_block_idx += 1 # block 1 or block 2 diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, diffusers_resnet_prefix="mid_block.resnets.1", resnet_prefix=f"middle_block.{original_block_idx}", ) ) return diffusers_checkpoint # <original>.output_blocks -> <diffusers>.up_blocks def unet_upblock_to_diffusers_checkpoint( model, checkpoint, *, diffusers_up_block_idx, original_up_block_idx, num_head_channels ): diffusers_checkpoint = {} diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.resnets" original_up_block_prefix = "output_blocks" up_block = model.up_blocks[diffusers_up_block_idx] num_resnets = len(up_block.resnets) if up_block.upsamplers is None: upsampler = False else: assert len(up_block.upsamplers) == 1 upsampler = True # The upsample block is also a resnet num_resnets += 1 has_attentions = hasattr(up_block, "attentions") for resnet_idx_inc in range(num_resnets): if upsampler and resnet_idx_inc == num_resnets - 1: # this is an upsample block if has_attentions: # There is a middle attention block that we skip original_resnet_block_idx = 2 else: original_resnet_block_idx = 1 # we add the `minus 1` because the last two resnets are stuck together in the same output block full_resnet_prefix = ( f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc - 1}.{original_resnet_block_idx}" ) full_diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.upsamplers.0" else: # this is a regular resnet block full_resnet_prefix = f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc}.0" full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix ) ) if has_attentions: num_attentions = len(up_block.attentions) diffusers_attention_prefix = f"up_blocks.{diffusers_up_block_idx}.attentions" for attention_idx_inc in range(num_attentions): full_attention_prefix = f"{original_up_block_prefix}.{original_up_block_idx + attention_idx_inc}.1" full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}" diffusers_checkpoint.update( attention_to_diffusers_checkpoint( checkpoint, attention_prefix=full_attention_prefix, diffusers_attention_prefix=full_diffusers_attention_prefix, num_head_channels=num_head_channels, ) ) num_original_down_blocks = num_resnets - 1 if upsampler else num_resnets return diffusers_checkpoint, num_original_down_blocks def resnet_to_diffusers_checkpoint(checkpoint, *, diffusers_resnet_prefix, resnet_prefix): diffusers_checkpoint = { f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.in_layers.0.weight"], f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.in_layers.0.bias"], f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.in_layers.2.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.in_layers.2.bias"], f"{diffusers_resnet_prefix}.time_emb_proj.weight": checkpoint[f"{resnet_prefix}.emb_layers.1.weight"], f"{diffusers_resnet_prefix}.time_emb_proj.bias": checkpoint[f"{resnet_prefix}.emb_layers.1.bias"], f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.out_layers.0.weight"], f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.out_layers.0.bias"], f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.out_layers.3.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.out_layers.3.bias"], } skip_connection_prefix = f"{resnet_prefix}.skip_connection" if f"{skip_connection_prefix}.weight" in checkpoint: diffusers_checkpoint.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{skip_connection_prefix}.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{skip_connection_prefix}.bias"], } ) return diffusers_checkpoint def attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix, num_head_channels): diffusers_checkpoint = {} # <original>.norm -> <diffusers>.group_norm diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"], f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"], } ) # <original>.qkv -> <diffusers>.{query, key, value} [q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions( weight=checkpoint[f"{attention_prefix}.qkv.weight"][:, :, 0], bias=checkpoint[f"{attention_prefix}.qkv.bias"], split=3, chunk_size=num_head_channels, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_q.weight": q_weight, f"{diffusers_attention_prefix}.to_q.bias": q_bias, f"{diffusers_attention_prefix}.to_k.weight": k_weight, f"{diffusers_attention_prefix}.to_k.bias": k_bias, f"{diffusers_attention_prefix}.to_v.weight": v_weight, f"{diffusers_attention_prefix}.to_v.bias": v_bias, } ) # <original>.encoder_kv -> <diffusers>.{context_key, context_value} [encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions( weight=checkpoint[f"{attention_prefix}.encoder_kv.weight"][:, :, 0], bias=checkpoint[f"{attention_prefix}.encoder_kv.bias"], split=2, chunk_size=num_head_channels, ) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.add_k_proj.weight": encoder_k_weight, f"{diffusers_attention_prefix}.add_k_proj.bias": encoder_k_bias, f"{diffusers_attention_prefix}.add_v_proj.weight": encoder_v_weight, f"{diffusers_attention_prefix}.add_v_proj.bias": encoder_v_bias, } ) # <original>.proj_out (1d conv) -> <diffusers>.proj_attn (linear) diffusers_checkpoint.update( { f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][ :, :, 0 ], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } ) return diffusers_checkpoint # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?) def split_attentions(*, weight, bias, split, chunk_size): weights = [None] * split biases = [None] * split weights_biases_idx = 0 for starting_row_index in range(0, weight.shape[0], chunk_size): row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size) weight_rows = weight[row_indices, :] bias_rows = bias[row_indices] if weights[weights_biases_idx] is None: assert weights[weights_biases_idx] is None weights[weights_biases_idx] = weight_rows biases[weights_biases_idx] = bias_rows else: assert weights[weights_biases_idx] is not None weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows]) biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows]) weights_biases_idx = (weights_biases_idx + 1) % split return weights, biases # done unet utils def prior(*, args, checkpoint_map_location): print("loading prior") prior_checkpoint = torch.load(args.prior_checkpoint_path, map_location=checkpoint_map_location) clip_stats_checkpoint = torch.load(args.clip_stat_path, map_location=checkpoint_map_location) prior_model = prior_model_from_original_config() prior_diffusers_checkpoint = prior_original_checkpoint_to_diffusers_checkpoint( prior_model, prior_checkpoint, clip_stats_checkpoint ) del prior_checkpoint del clip_stats_checkpoint load_checkpoint_to_model(prior_diffusers_checkpoint, prior_model, strict=True) print("done loading prior") return prior_model def text2img(*, args, checkpoint_map_location): print("loading text2img") text2img_checkpoint = torch.load(args.text2img_checkpoint_path, map_location=checkpoint_map_location) unet_model = unet_model_from_original_config() unet_diffusers_checkpoint = unet_original_checkpoint_to_diffusers_checkpoint(unet_model, text2img_checkpoint) del text2img_checkpoint load_checkpoint_to_model(unet_diffusers_checkpoint, unet_model, strict=True) print("done loading text2img") return unet_model def inpaint_text2img(*, args, checkpoint_map_location): print("loading inpaint text2img") inpaint_text2img_checkpoint = torch.load( args.inpaint_text2img_checkpoint_path, map_location=checkpoint_map_location ) inpaint_unet_model = inpaint_unet_model_from_original_config() inpaint_unet_diffusers_checkpoint = inpaint_unet_original_checkpoint_to_diffusers_checkpoint( inpaint_unet_model, inpaint_text2img_checkpoint ) del inpaint_text2img_checkpoint load_checkpoint_to_model(inpaint_unet_diffusers_checkpoint, inpaint_unet_model, strict=True) print("done loading inpaint text2img") return inpaint_unet_model # movq MOVQ_CONFIG = { "in_channels": 3, "out_channels": 3, "latent_channels": 4, "down_block_types": ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D"), "up_block_types": ("AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"), "num_vq_embeddings": 16384, "block_out_channels": (128, 256, 256, 512), "vq_embed_dim": 4, "layers_per_block": 2, "norm_type": "spatial", } def movq_model_from_original_config(): movq = VQModel(**MOVQ_CONFIG) return movq def movq_encoder_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # conv_in diffusers_checkpoint.update( { "encoder.conv_in.weight": checkpoint["encoder.conv_in.weight"], "encoder.conv_in.bias": checkpoint["encoder.conv_in.bias"], } ) # down_blocks for down_block_idx, down_block in enumerate(model.encoder.down_blocks): diffusers_down_block_prefix = f"encoder.down_blocks.{down_block_idx}" down_block_prefix = f"encoder.down.{down_block_idx}" # resnets for resnet_idx, resnet in enumerate(down_block.resnets): diffusers_resnet_prefix = f"{diffusers_down_block_prefix}.resnets.{resnet_idx}" resnet_prefix = f"{down_block_prefix}.block.{resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) # downsample # do not include the downsample when on the last down block # There is no downsample on the last down block if down_block_idx != len(model.encoder.down_blocks) - 1: # There's a single downsample in the original checkpoint but a list of downsamples # in the diffusers model. diffusers_downsample_prefix = f"{diffusers_down_block_prefix}.downsamplers.0.conv" downsample_prefix = f"{down_block_prefix}.downsample.conv" diffusers_checkpoint.update( { f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"], f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"], } ) # attentions if hasattr(down_block, "attentions"): for attention_idx, _ in enumerate(down_block.attentions): diffusers_attention_prefix = f"{diffusers_down_block_prefix}.attentions.{attention_idx}" attention_prefix = f"{down_block_prefix}.attn.{attention_idx}" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix, ) ) # mid block # mid block attentions # There is a single hardcoded attention block in the middle of the VQ-diffusion encoder diffusers_attention_prefix = "encoder.mid_block.attentions.0" attention_prefix = "encoder.mid.attn_1" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix ) ) # mid block resnets for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets): diffusers_resnet_prefix = f"encoder.mid_block.resnets.{diffusers_resnet_idx}" # the hardcoded prefixes to `block_` are 1 and 2 orig_resnet_idx = diffusers_resnet_idx + 1 # There are two hardcoded resnets in the middle of the VQ-diffusion encoder resnet_prefix = f"encoder.mid.block_{orig_resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) diffusers_checkpoint.update( { # conv_norm_out "encoder.conv_norm_out.weight": checkpoint["encoder.norm_out.weight"], "encoder.conv_norm_out.bias": checkpoint["encoder.norm_out.bias"], # conv_out "encoder.conv_out.weight": checkpoint["encoder.conv_out.weight"], "encoder.conv_out.bias": checkpoint["encoder.conv_out.bias"], } ) return diffusers_checkpoint def movq_decoder_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # conv in diffusers_checkpoint.update( { "decoder.conv_in.weight": checkpoint["decoder.conv_in.weight"], "decoder.conv_in.bias": checkpoint["decoder.conv_in.bias"], } ) # up_blocks for diffusers_up_block_idx, up_block in enumerate(model.decoder.up_blocks): # up_blocks are stored in reverse order in the VQ-diffusion checkpoint orig_up_block_idx = len(model.decoder.up_blocks) - 1 - diffusers_up_block_idx diffusers_up_block_prefix = f"decoder.up_blocks.{diffusers_up_block_idx}" up_block_prefix = f"decoder.up.{orig_up_block_idx}" # resnets for resnet_idx, resnet in enumerate(up_block.resnets): diffusers_resnet_prefix = f"{diffusers_up_block_prefix}.resnets.{resnet_idx}" resnet_prefix = f"{up_block_prefix}.block.{resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint_spatial_norm( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) # upsample # there is no up sample on the last up block if diffusers_up_block_idx != len(model.decoder.up_blocks) - 1: # There's a single upsample in the VQ-diffusion checkpoint but a list of downsamples # in the diffusers model. diffusers_downsample_prefix = f"{diffusers_up_block_prefix}.upsamplers.0.conv" downsample_prefix = f"{up_block_prefix}.upsample.conv" diffusers_checkpoint.update( { f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"], f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"], } ) # attentions if hasattr(up_block, "attentions"): for attention_idx, _ in enumerate(up_block.attentions): diffusers_attention_prefix = f"{diffusers_up_block_prefix}.attentions.{attention_idx}" attention_prefix = f"{up_block_prefix}.attn.{attention_idx}" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint_spatial_norm( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix, ) ) # mid block # mid block attentions # There is a single hardcoded attention block in the middle of the VQ-diffusion decoder diffusers_attention_prefix = "decoder.mid_block.attentions.0" attention_prefix = "decoder.mid.attn_1" diffusers_checkpoint.update( movq_attention_to_diffusers_checkpoint_spatial_norm( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix ) ) # mid block resnets for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets): diffusers_resnet_prefix = f"decoder.mid_block.resnets.{diffusers_resnet_idx}" # the hardcoded prefixes to `block_` are 1 and 2 orig_resnet_idx = diffusers_resnet_idx + 1 # There are two hardcoded resnets in the middle of the VQ-diffusion decoder resnet_prefix = f"decoder.mid.block_{orig_resnet_idx}" diffusers_checkpoint.update( movq_resnet_to_diffusers_checkpoint_spatial_norm( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) diffusers_checkpoint.update( { # conv_norm_out "decoder.conv_norm_out.norm_layer.weight": checkpoint["decoder.norm_out.norm_layer.weight"], "decoder.conv_norm_out.norm_layer.bias": checkpoint["decoder.norm_out.norm_layer.bias"], "decoder.conv_norm_out.conv_y.weight": checkpoint["decoder.norm_out.conv_y.weight"], "decoder.conv_norm_out.conv_y.bias": checkpoint["decoder.norm_out.conv_y.bias"], "decoder.conv_norm_out.conv_b.weight": checkpoint["decoder.norm_out.conv_b.weight"], "decoder.conv_norm_out.conv_b.bias": checkpoint["decoder.norm_out.conv_b.bias"], # conv_out "decoder.conv_out.weight": checkpoint["decoder.conv_out.weight"], "decoder.conv_out.bias": checkpoint["decoder.conv_out.bias"], } ) return diffusers_checkpoint def movq_resnet_to_diffusers_checkpoint(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix): rv = { # norm1 f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.norm1.weight"], f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.norm1.bias"], # conv1 f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"], # norm2 f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.norm2.weight"], f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.norm2.bias"], # conv2 f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"], } if resnet.conv_shortcut is not None: rv.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"], } ) return rv def movq_resnet_to_diffusers_checkpoint_spatial_norm(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix): rv = { # norm1 f"{diffusers_resnet_prefix}.norm1.norm_layer.weight": checkpoint[f"{resnet_prefix}.norm1.norm_layer.weight"], f"{diffusers_resnet_prefix}.norm1.norm_layer.bias": checkpoint[f"{resnet_prefix}.norm1.norm_layer.bias"], f"{diffusers_resnet_prefix}.norm1.conv_y.weight": checkpoint[f"{resnet_prefix}.norm1.conv_y.weight"], f"{diffusers_resnet_prefix}.norm1.conv_y.bias": checkpoint[f"{resnet_prefix}.norm1.conv_y.bias"], f"{diffusers_resnet_prefix}.norm1.conv_b.weight": checkpoint[f"{resnet_prefix}.norm1.conv_b.weight"], f"{diffusers_resnet_prefix}.norm1.conv_b.bias": checkpoint[f"{resnet_prefix}.norm1.conv_b.bias"], # conv1 f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"], # norm2 f"{diffusers_resnet_prefix}.norm2.norm_layer.weight": checkpoint[f"{resnet_prefix}.norm2.norm_layer.weight"], f"{diffusers_resnet_prefix}.norm2.norm_layer.bias": checkpoint[f"{resnet_prefix}.norm2.norm_layer.bias"], f"{diffusers_resnet_prefix}.norm2.conv_y.weight": checkpoint[f"{resnet_prefix}.norm2.conv_y.weight"], f"{diffusers_resnet_prefix}.norm2.conv_y.bias": checkpoint[f"{resnet_prefix}.norm2.conv_y.bias"], f"{diffusers_resnet_prefix}.norm2.conv_b.weight": checkpoint[f"{resnet_prefix}.norm2.conv_b.weight"], f"{diffusers_resnet_prefix}.norm2.conv_b.bias": checkpoint[f"{resnet_prefix}.norm2.conv_b.bias"], # conv2 f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"], } if resnet.conv_shortcut is not None: rv.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"], f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"], } ) return rv def movq_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix): return { # norm f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"], f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"], # query f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.q.bias"], # key f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.k.bias"], # value f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.v.bias"], # proj_attn f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } def movq_attention_to_diffusers_checkpoint_spatial_norm(checkpoint, *, diffusers_attention_prefix, attention_prefix): return { # norm f"{diffusers_attention_prefix}.spatial_norm.norm_layer.weight": checkpoint[ f"{attention_prefix}.norm.norm_layer.weight" ], f"{diffusers_attention_prefix}.spatial_norm.norm_layer.bias": checkpoint[ f"{attention_prefix}.norm.norm_layer.bias" ], f"{diffusers_attention_prefix}.spatial_norm.conv_y.weight": checkpoint[ f"{attention_prefix}.norm.conv_y.weight" ], f"{diffusers_attention_prefix}.spatial_norm.conv_y.bias": checkpoint[f"{attention_prefix}.norm.conv_y.bias"], f"{diffusers_attention_prefix}.spatial_norm.conv_b.weight": checkpoint[ f"{attention_prefix}.norm.conv_b.weight" ], f"{diffusers_attention_prefix}.spatial_norm.conv_b.bias": checkpoint[f"{attention_prefix}.norm.conv_b.bias"], # query f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.q.bias"], # key f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.k.bias"], # value f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.v.bias"], # proj_attn f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][:, :, 0, 0], f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"], } def movq_original_checkpoint_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} diffusers_checkpoint.update(movq_encoder_to_diffusers_checkpoint(model, checkpoint)) # quant_conv diffusers_checkpoint.update( { "quant_conv.weight": checkpoint["quant_conv.weight"], "quant_conv.bias": checkpoint["quant_conv.bias"], } ) # quantize diffusers_checkpoint.update({"quantize.embedding.weight": checkpoint["quantize.embedding.weight"]}) # post_quant_conv diffusers_checkpoint.update( { "post_quant_conv.weight": checkpoint["post_quant_conv.weight"], "post_quant_conv.bias": checkpoint["post_quant_conv.bias"], } ) # decoder diffusers_checkpoint.update(movq_decoder_to_diffusers_checkpoint(model, checkpoint)) return diffusers_checkpoint def movq(*, args, checkpoint_map_location): print("loading movq") movq_checkpoint = torch.load(args.movq_checkpoint_path, map_location=checkpoint_map_location) movq_model = movq_model_from_original_config() movq_diffusers_checkpoint = movq_original_checkpoint_to_diffusers_checkpoint(movq_model, movq_checkpoint) del movq_checkpoint load_checkpoint_to_model(movq_diffusers_checkpoint, movq_model, strict=True) print("done loading movq") return movq_model def load_checkpoint_to_model(checkpoint, model, strict=False): with tempfile.NamedTemporaryFile(delete=False) as file: torch.save(checkpoint, file.name) del checkpoint if strict: model.load_state_dict(torch.load(file.name), strict=True) else: load_checkpoint_and_dispatch(model, file.name, device_map="auto") os.remove(file.name) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--prior_checkpoint_path", default=None, type=str, required=False, help="Path to the prior checkpoint to convert.", ) parser.add_argument( "--clip_stat_path", default=None, type=str, required=False, help="Path to the clip stats checkpoint to convert.", ) parser.add_argument( "--text2img_checkpoint_path", default=None, type=str, required=False, help="Path to the text2img checkpoint to convert.", ) parser.add_argument( "--movq_checkpoint_path", default=None, type=str, required=False, help="Path to the text2img checkpoint to convert.", ) parser.add_argument( "--inpaint_text2img_checkpoint_path", default=None, type=str, required=False, help="Path to the inpaint text2img checkpoint to convert.", ) parser.add_argument( "--checkpoint_load_device", default="cpu", type=str, required=False, help="The device passed to `map_location` when loading checkpoints.", ) parser.add_argument( "--debug", default=None, type=str, required=False, help="Only run a specific stage of the convert script. Used for debugging", ) args = parser.parse_args() print(f"loading checkpoints to {args.checkpoint_load_device}") checkpoint_map_location = torch.device(args.checkpoint_load_device) if args.debug is not None: print(f"debug: only executing {args.debug}") if args.debug is None: print("to-do") elif args.debug == "prior": prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location) prior_model.save_pretrained(args.dump_path) elif args.debug == "text2img": unet_model = text2img(args=args, checkpoint_map_location=checkpoint_map_location) unet_model.save_pretrained(f"{args.dump_path}/unet") elif args.debug == "inpaint_text2img": inpaint_unet_model = inpaint_text2img(args=args, checkpoint_map_location=checkpoint_map_location) inpaint_unet_model.save_pretrained(f"{args.dump_path}/inpaint_unet") elif args.debug == "decoder": decoder = movq(args=args, checkpoint_map_location=checkpoint_map_location) decoder.save_pretrained(f"{args.dump_path}/decoder") else: raise ValueError(f"unknown debug value : {args.debug}")
diffusers/scripts/convert_kandinsky_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_kandinsky_to_diffusers.py", "repo_id": "diffusers", "token_count": 23602 }
118
# Run this script to convert the Stable Cascade model weights to a diffusers pipeline. import argparse from contextlib import nullcontext import torch from safetensors.torch import load_file from transformers import ( AutoTokenizer, CLIPConfig, CLIPImageProcessor, CLIPTextModelWithProjection, CLIPVisionModelWithProjection, ) from diffusers import ( DDPMWuerstchenScheduler, StableCascadeCombinedPipeline, StableCascadeDecoderPipeline, StableCascadePriorPipeline, ) from diffusers.loaders.single_file_utils import convert_stable_cascade_unet_single_file_to_diffusers from diffusers.models import StableCascadeUNet from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils import is_accelerate_available if is_accelerate_available(): from accelerate import init_empty_weights parser = argparse.ArgumentParser(description="Convert Stable Cascade model weights to a diffusers pipeline") parser.add_argument("--model_path", type=str, help="Location of Stable Cascade weights") parser.add_argument( "--stage_c_name", type=str, default="stage_c_lite.safetensors", help="Name of stage c checkpoint file" ) parser.add_argument( "--stage_b_name", type=str, default="stage_b_lite.safetensors", help="Name of stage b checkpoint file" ) parser.add_argument("--skip_stage_c", action="store_true", help="Skip converting stage c") parser.add_argument("--skip_stage_b", action="store_true", help="Skip converting stage b") parser.add_argument("--use_safetensors", action="store_true", help="Use SafeTensors for conversion") parser.add_argument( "--prior_output_path", default="stable-cascade-prior-lite", type=str, help="Hub organization to save the pipelines to", ) parser.add_argument( "--decoder_output_path", type=str, default="stable-cascade-decoder-lite", help="Hub organization to save the pipelines to", ) parser.add_argument( "--combined_output_path", type=str, default="stable-cascade-combined-lite", help="Hub organization to save the pipelines to", ) parser.add_argument("--save_combined", action="store_true") parser.add_argument("--push_to_hub", action="store_true", help="Push to hub") parser.add_argument("--variant", type=str, help="Set to bf16 to save bfloat16 weights") args = parser.parse_args() if args.skip_stage_b and args.skip_stage_c: raise ValueError("At least one stage should be converted") if (args.skip_stage_b or args.skip_stage_c) and args.save_combined: raise ValueError("Cannot skip stages when creating a combined pipeline") model_path = args.model_path device = "cpu" if args.variant == "bf16": dtype = torch.bfloat16 else: dtype = torch.float32 # set paths to model weights prior_checkpoint_path = f"{model_path}/{args.stage_c_name}" decoder_checkpoint_path = f"{model_path}/{args.stage_b_name}" # Clip Text encoder and tokenizer config = CLIPConfig.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") config.text_config.projection_dim = config.projection_dim text_encoder = CLIPTextModelWithProjection.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", config=config.text_config ) tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") # image processor feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") # scheduler for prior and decoder scheduler = DDPMWuerstchenScheduler() ctx = init_empty_weights if is_accelerate_available() else nullcontext if not args.skip_stage_c: # Prior if args.use_safetensors: prior_orig_state_dict = load_file(prior_checkpoint_path, device=device) else: prior_orig_state_dict = torch.load(prior_checkpoint_path, map_location=device) prior_state_dict = convert_stable_cascade_unet_single_file_to_diffusers(prior_orig_state_dict) with ctx(): prior_model = StableCascadeUNet( in_channels=16, out_channels=16, timestep_ratio_embedding_dim=64, patch_size=1, conditioning_dim=1536, block_out_channels=[1536, 1536], num_attention_heads=[24, 24], down_num_layers_per_block=[4, 12], up_num_layers_per_block=[12, 4], down_blocks_repeat_mappers=[1, 1], up_blocks_repeat_mappers=[1, 1], block_types_per_layer=[ ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], clip_text_in_channels=1280, clip_text_pooled_in_channels=1280, clip_image_in_channels=768, clip_seq=4, kernel_size=3, dropout=[0.1, 0.1], self_attn=True, timestep_conditioning_type=["sca", "crp"], switch_level=[False], ) if is_accelerate_available(): load_model_dict_into_meta(prior_model, prior_state_dict) else: prior_model.load_state_dict(prior_state_dict) # Prior pipeline prior_pipeline = StableCascadePriorPipeline( prior=prior_model, tokenizer=tokenizer, text_encoder=text_encoder, image_encoder=image_encoder, scheduler=scheduler, feature_extractor=feature_extractor, ) prior_pipeline.to(dtype).save_pretrained( args.prior_output_path, push_to_hub=args.push_to_hub, variant=args.variant ) if not args.skip_stage_b: # Decoder if args.use_safetensors: decoder_orig_state_dict = load_file(decoder_checkpoint_path, device=device) else: decoder_orig_state_dict = torch.load(decoder_checkpoint_path, map_location=device) decoder_state_dict = convert_stable_cascade_unet_single_file_to_diffusers(decoder_orig_state_dict) with ctx(): decoder = StableCascadeUNet( in_channels=4, out_channels=4, timestep_ratio_embedding_dim=64, patch_size=2, conditioning_dim=1280, block_out_channels=[320, 576, 1152, 1152], down_num_layers_per_block=[2, 4, 14, 4], up_num_layers_per_block=[4, 14, 4, 2], down_blocks_repeat_mappers=[1, 1, 1, 1], up_blocks_repeat_mappers=[2, 2, 2, 2], num_attention_heads=[0, 9, 18, 18], block_types_per_layer=[ ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], clip_text_pooled_in_channels=1280, clip_seq=4, effnet_in_channels=16, pixel_mapper_in_channels=3, kernel_size=3, dropout=[0, 0, 0.1, 0.1], self_attn=True, timestep_conditioning_type=["sca"], ) if is_accelerate_available(): load_model_dict_into_meta(decoder, decoder_state_dict) else: decoder.load_state_dict(decoder_state_dict) # VQGAN from Wuerstchen-V2 vqmodel = PaellaVQModel.from_pretrained("warp-ai/wuerstchen", subfolder="vqgan") # Decoder pipeline decoder_pipeline = StableCascadeDecoderPipeline( decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, vqgan=vqmodel, scheduler=scheduler ) decoder_pipeline.to(dtype).save_pretrained( args.decoder_output_path, push_to_hub=args.push_to_hub, variant=args.variant ) if args.save_combined: # Stable Cascade combined pipeline stable_cascade_pipeline = StableCascadeCombinedPipeline( # Decoder text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqmodel, # Prior prior_text_encoder=text_encoder, prior_tokenizer=tokenizer, prior_prior=prior_model, prior_scheduler=scheduler, prior_image_encoder=image_encoder, prior_feature_extractor=feature_extractor, ) stable_cascade_pipeline.to(dtype).save_pretrained( args.combined_output_path, push_to_hub=args.push_to_hub, variant=args.variant )
diffusers/scripts/convert_stable_cascade_lite.py/0
{ "file_path": "diffusers/scripts/convert_stable_cascade_lite.py", "repo_id": "diffusers", "token_count": 3645 }
119
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for PyPI. 1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the documentation. If releasing on a special branch, copy the updated README.md on the main branch for the commit you will make for the post-release and run `make fix-copies` on the main branch as well. 2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid. 3. Unpin specific versions from setup.py that use a git install. 4. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the message: "Release: <RELEASE>" and push. 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs). 6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for PyPI'" Push the tag to git: git push --tags origin v<RELEASE>-release 7. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory (This will build a wheel for the Python version you use to build it). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. Long story cut short, you need to run both before you can upload the distribution to the test PyPI and the actual PyPI servers: python setup.py bdist_wheel && python setup.py sdist 8. Check that everything looks correct by uploading the package to the PyPI test server: twine upload dist/* -r pypitest (pypi suggests using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi diffusers If you are testing from a Colab Notebook, for instance, then do: pip install diffusers && pip uninstall diffusers pip install -i https://testpypi.python.org/pypi diffusers Check you can run the following commands: python -c "from diffusers import __version__; print(__version__)" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')" python -c "from diffusers import *" 9. Upload the final version to the actual PyPI: twine upload dist/* -r pypi 10. Prepare the release notes and publish them on GitHub once everything is looking hunky-dory. 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, you need to go back to main before executing this. """ import os import re import sys from distutils.core import Command from setuptools import find_packages, setup # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py _deps = [ "Pillow", # keep the PIL.Image.Resampling deprecation away "accelerate>=0.11.0", "compel==0.1.8", "datasets", "filelock", "flax>=0.4.1", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.20.2", "requests-mock==1.10.0", "importlib_metadata", "invisible-watermark>=0.2.0", "isort>=5.5.4", "jax>=0.4.1", "jaxlib>=0.4.1", "Jinja2", "k-diffusion>=0.0.12", "torchsde", "note_seq", "librosa", "numpy", "parameterized", "peft>=0.6.0", "protobuf>=3.20.3,<4", "pytest", "pytest-timeout", "pytest-xdist", "python>=3.8.0", "ruff==0.1.5", "safetensors>=0.3.1", "sentencepiece>=0.1.91,!=0.1.92", "GitPython<3.1.19", "scipy", "onnx", "regex!=2019.12.17", "requests", "tensorboard", "torch>=1.4", "torchvision", "transformers>=4.25.1", "urllib3<=2.0.0", ] # this is a lookup table with items like: # # tokenizers: "huggingface-hub==0.8.0" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} # since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: # # python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets # # Just pass the desired package names to that script as it's shown with 2 packages above. # # If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above # # You can then feed this for example to `pip`: # # pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets) # def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ( "dep-table-update", None, "updates src/diffusers/dependency_versions_table.py", ), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make deps_table_update`", "deps = {", entries, "}", "", ] target = "src/diffusers/dependency_versions_table.py" print(f"updating {target}") with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) extras = {} extras["quality"] = deps_list("urllib3", "isort", "ruff", "hf-doc-builder") extras["docs"] = deps_list("hf-doc-builder") extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2", "peft") extras["test"] = deps_list( "compel", "GitPython", "datasets", "Jinja2", "invisible-watermark", "k-diffusion", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock", "safetensors", "sentencepiece", "scipy", "torchvision", "transformers", ) extras["torch"] = deps_list("torch", "accelerate") if os.name == "nt": # windows extras["flax"] = [] # jax is not supported on windows else: extras["flax"] = deps_list("jax", "jaxlib", "flax") extras["dev"] = ( extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"] ) install_requires = [ deps["importlib_metadata"], deps["filelock"], deps["huggingface-hub"], deps["numpy"], deps["regex"], deps["requests"], deps["safetensors"], deps["Pillow"], ] version_range_max = max(sys.version_info[1], 10) + 1 setup( name="diffusers", version="0.28.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="State-of-the-art diffusion in PyTorch and JAX.", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning diffusion jax pytorch stable diffusion audioldm", license="Apache 2.0 License", author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)", author_email="[email protected]", url="https://github.com/huggingface/diffusers", package_dir={"": "src"}, packages=find_packages("src"), package_data={"diffusers": ["py.typed"]}, include_package_data=True, python_requires=">=3.8.0", install_requires=list(install_requires), extras_require=extras, entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python :: 3", ] + [f"Programming Language :: Python :: 3.{i}" for i in range(8, version_range_max)], cmdclass={"deps_table_update": DepsTableUpdateCommand}, )
diffusers/setup.py/0
{ "file_path": "diffusers/setup.py", "repo_id": "diffusers", "token_count": 3764 }
120
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub.utils import validate_hf_hub_args from .single_file_utils import ( create_diffusers_controlnet_model_from_ldm, fetch_ldm_config_and_checkpoint, ) class FromOriginalControlNetMixin: """ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`]. """ @classmethod @validate_hf_hub_args def from_single_file(cls, pretrained_model_link_or_path, **kwargs): r""" Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A link to the `.ckpt` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub. - A path to a *file* containing all pipeline weights. config_file (`str`, *optional*): Filepath to the configuration YAML file associated with the model. If not provided it will default to: https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to True, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. image_size (`int`, *optional*, defaults to 512): The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable Diffusion v2 base model. Use 768 for Stable Diffusion v2. upcast_attention (`bool`, *optional*, defaults to `None`): Whether the attention computation should always be upcasted. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (for example the pipeline components of the specific pipeline class). The overwritten components are directly passed to the pipelines `__init__` method. See example below for more information. Examples: ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path model = ControlNetModel.from_single_file(url) url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet) ``` """ original_config_file = kwargs.pop("original_config_file", None) config_file = kwargs.pop("config_file", None) resume_download = kwargs.pop("resume_download", False) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) cache_dir = kwargs.pop("cache_dir", None) local_files_only = kwargs.pop("local_files_only", None) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) class_name = cls.__name__ if (config_file is not None) and (original_config_file is not None): raise ValueError( "You cannot pass both `config_file` and `original_config_file` to `from_single_file`. Please use only one of these arguments." ) original_config_file = config_file or original_config_file original_config, checkpoint = fetch_ldm_config_and_checkpoint( pretrained_model_link_or_path=pretrained_model_link_or_path, class_name=class_name, original_config_file=original_config_file, resume_download=resume_download, force_download=force_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, cache_dir=cache_dir, ) upcast_attention = kwargs.pop("upcast_attention", False) image_size = kwargs.pop("image_size", None) component = create_diffusers_controlnet_model_from_ldm( class_name, original_config, checkpoint, upcast_attention=upcast_attention, image_size=image_size, torch_dtype=torch_dtype, ) controlnet = component["controlnet"] if torch_dtype is not None: controlnet = controlnet.to(torch_dtype) return controlnet
diffusers/src/diffusers/loaders/controlnet.py/0
{ "file_path": "diffusers/src/diffusers/loaders/controlnet.py", "repo_id": "diffusers", "token_count": 2891 }
121
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from importlib import import_module from typing import Callable, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..image_processor import IPAdapterMaskProcessor from ..utils import deprecate, logging from ..utils.import_utils import is_xformers_available from ..utils.torch_utils import maybe_allow_in_graph from .lora import LoRALinearLayer logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_xformers_available(): import xformers import xformers.ops else: xformers = None @maybe_allow_in_graph class Attention(nn.Module): r""" A cross attention layer. Parameters: query_dim (`int`): The number of channels in the query. cross_attention_dim (`int`, *optional*): The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention. dim_head (`int`, *optional*, defaults to 64): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. bias (`bool`, *optional*, defaults to False): Set to `True` for the query, key, and value linear layers to contain a bias parameter. upcast_attention (`bool`, *optional*, defaults to False): Set to `True` to upcast the attention computation to `float32`. upcast_softmax (`bool`, *optional*, defaults to False): Set to `True` to upcast the softmax computation to `float32`. cross_attention_norm (`str`, *optional*, defaults to `None`): The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. cross_attention_norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the group norm in the cross attention. added_kv_proj_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the added key and value projections. If `None`, no projection is used. norm_num_groups (`int`, *optional*, defaults to `None`): The number of groups to use for the group norm in the attention. spatial_norm_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the spatial normalization. out_bias (`bool`, *optional*, defaults to `True`): Set to `True` to use a bias in the output linear layer. scale_qk (`bool`, *optional*, defaults to `True`): Set to `True` to scale the query and key by `1 / sqrt(dim_head)`. only_cross_attention (`bool`, *optional*, defaults to `False`): Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if `added_kv_proj_dim` is not `None`. eps (`float`, *optional*, defaults to 1e-5): An additional value added to the denominator in group normalization that is used for numerical stability. rescale_output_factor (`float`, *optional*, defaults to 1.0): A factor to rescale the output by dividing it with this value. residual_connection (`bool`, *optional*, defaults to `False`): Set to `True` to add the residual connection to the output. _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`): Set to `True` if the attention block is loaded from a deprecated state dict. processor (`AttnProcessor`, *optional*, defaults to `None`): The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and `AttnProcessor` otherwise. """ def __init__( self, query_dim: int, cross_attention_dim: Optional[int] = None, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, upcast_attention: bool = False, upcast_softmax: bool = False, cross_attention_norm: Optional[str] = None, cross_attention_norm_num_groups: int = 32, added_kv_proj_dim: Optional[int] = None, norm_num_groups: Optional[int] = None, spatial_norm_dim: Optional[int] = None, out_bias: bool = True, scale_qk: bool = True, only_cross_attention: bool = False, eps: float = 1e-5, rescale_output_factor: float = 1.0, residual_connection: bool = False, _from_deprecated_attn_block: bool = False, processor: Optional["AttnProcessor"] = None, out_dim: int = None, ): super().__init__() self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.use_bias = bias self.is_cross_attention = cross_attention_dim is not None self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim self.upcast_attention = upcast_attention self.upcast_softmax = upcast_softmax self.rescale_output_factor = rescale_output_factor self.residual_connection = residual_connection self.dropout = dropout self.fused_projections = False self.out_dim = out_dim if out_dim is not None else query_dim # we make use of this private variable to know whether this class is loaded # with an deprecated state dict so that we can convert it on the fly self._from_deprecated_attn_block = _from_deprecated_attn_block self.scale_qk = scale_qk self.scale = dim_head**-0.5 if self.scale_qk else 1.0 self.heads = out_dim // dim_head if out_dim is not None else heads # for slice_size > 0 the attention score computation # is split across the batch axis to save memory # You can set slice_size with `set_attention_slice` self.sliceable_head_dim = heads self.added_kv_proj_dim = added_kv_proj_dim self.only_cross_attention = only_cross_attention if self.added_kv_proj_dim is None and self.only_cross_attention: raise ValueError( "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." ) if norm_num_groups is not None: self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True) else: self.group_norm = None if spatial_norm_dim is not None: self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim) else: self.spatial_norm = None if cross_attention_norm is None: self.norm_cross = None elif cross_attention_norm == "layer_norm": self.norm_cross = nn.LayerNorm(self.cross_attention_dim) elif cross_attention_norm == "group_norm": if self.added_kv_proj_dim is not None: # The given `encoder_hidden_states` are initially of shape # (batch_size, seq_len, added_kv_proj_dim) before being projected # to (batch_size, seq_len, cross_attention_dim). The norm is applied # before the projection, so we need to use `added_kv_proj_dim` as # the number of channels for the group norm. norm_cross_num_channels = added_kv_proj_dim else: norm_cross_num_channels = self.cross_attention_dim self.norm_cross = nn.GroupNorm( num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True ) else: raise ValueError( f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'" ) linear_cls = nn.Linear self.linear_cls = linear_cls self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias) if not self.only_cross_attention: # only relevant for the `AddedKVProcessor` classes self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) else: self.to_k = None self.to_v = None if self.added_kv_proj_dim is not None: self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim) self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim) self.to_out = nn.ModuleList([]) self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(nn.Dropout(dropout)) # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 if processor is None: processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_use_memory_efficient_attention_xformers( self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None ) -> None: r""" Set whether to use memory efficient attention from `xformers` or not. Args: use_memory_efficient_attention_xformers (`bool`): Whether to use memory efficient attention from `xformers` or not. attention_op (`Callable`, *optional*): The attention operation to use. Defaults to `None` which uses the default attention operation from `xformers`. """ is_lora = hasattr(self, "processor") and isinstance( self.processor, LORA_ATTENTION_PROCESSORS, ) is_custom_diffusion = hasattr(self, "processor") and isinstance( self.processor, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0), ) is_added_kv_processor = hasattr(self, "processor") and isinstance( self.processor, ( AttnAddedKVProcessor, AttnAddedKVProcessor2_0, SlicedAttnAddedKVProcessor, XFormersAttnAddedKVProcessor, LoRAAttnAddedKVProcessor, ), ) if use_memory_efficient_attention_xformers: if is_added_kv_processor and (is_lora or is_custom_diffusion): raise NotImplementedError( f"Memory efficient attention is currently not supported for LoRA or custom diffusion for attention processor type {self.processor}" ) if not is_xformers_available(): raise ModuleNotFoundError( ( "Refer to https://github.com/facebookresearch/xformers for more information on how to install" " xformers" ), name="xformers", ) elif not torch.cuda.is_available(): raise ValueError( "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" " only available for GPU " ) else: try: # Make sure we can run the memory efficient attention _ = xformers.ops.memory_efficient_attention( torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), ) except Exception as e: raise e if is_lora: # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0? processor = LoRAXFormersAttnProcessor( hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, rank=self.processor.rank, attention_op=attention_op, ) processor.load_state_dict(self.processor.state_dict()) processor.to(self.processor.to_q_lora.up.weight.device) elif is_custom_diffusion: processor = CustomDiffusionXFormersAttnProcessor( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, attention_op=attention_op, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) elif is_added_kv_processor: # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP # which uses this type of cross attention ONLY because the attention mask of format # [0, ..., -10.000, ..., 0, ...,] is not supported # throw warning logger.info( "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation." ) processor = XFormersAttnAddedKVProcessor(attention_op=attention_op) else: processor = XFormersAttnProcessor(attention_op=attention_op) else: if is_lora: attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor ) processor = attn_processor_class( hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, rank=self.processor.rank, ) processor.load_state_dict(self.processor.state_dict()) processor.to(self.processor.to_q_lora.up.weight.device) elif is_custom_diffusion: attn_processor_class = ( CustomDiffusionAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else CustomDiffusionAttnProcessor ) processor = attn_processor_class( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_attention_slice(self, slice_size: int) -> None: r""" Set the slice size for attention computation. Args: slice_size (`int`): The slice size for attention computation. """ if slice_size is not None and slice_size > self.sliceable_head_dim: raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") if slice_size is not None and self.added_kv_proj_dim is not None: processor = SlicedAttnAddedKVProcessor(slice_size) elif slice_size is not None: processor = SlicedAttnProcessor(slice_size) elif self.added_kv_proj_dim is not None: processor = AttnAddedKVProcessor() else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_processor(self, processor: "AttnProcessor") -> None: r""" Set the attention processor to use. Args: processor (`AttnProcessor`): The attention processor to use. """ # if current processor is in `self._modules` and if passed `processor` is not, we need to # pop `processor` from `self._modules` if ( hasattr(self, "processor") and isinstance(self.processor, torch.nn.Module) and not isinstance(processor, torch.nn.Module) ): logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") self._modules.pop("processor") self.processor = processor def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": r""" Get the attention processor in use. Args: return_deprecated_lora (`bool`, *optional*, defaults to `False`): Set to `True` to return the deprecated LoRA attention processor. Returns: "AttentionProcessor": The attention processor in use. """ if not return_deprecated_lora: return self.processor # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible # serialization format for LoRA Attention Processors. It should be deleted once the integration # with PEFT is completed. is_lora_activated = { name: module.lora_layer is not None for name, module in self.named_modules() if hasattr(module, "lora_layer") } # 1. if no layer has a LoRA activated we can return the processor as usual if not any(is_lora_activated.values()): return self.processor # If doesn't apply LoRA do `add_k_proj` or `add_v_proj` is_lora_activated.pop("add_k_proj", None) is_lora_activated.pop("add_v_proj", None) # 2. else it is not possible that only some layers have LoRA activated if not all(is_lora_activated.values()): raise ValueError( f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}" ) # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor non_lora_processor_cls_name = self.processor.__class__.__name__ lora_processor_cls = getattr(import_module(__name__), "LoRA" + non_lora_processor_cls_name) hidden_size = self.inner_dim # now create a LoRA attention processor from the LoRA layers if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]: kwargs = { "cross_attention_dim": self.cross_attention_dim, "rank": self.to_q.lora_layer.rank, "network_alpha": self.to_q.lora_layer.network_alpha, "q_rank": self.to_q.lora_layer.rank, "q_hidden_size": self.to_q.lora_layer.out_features, "k_rank": self.to_k.lora_layer.rank, "k_hidden_size": self.to_k.lora_layer.out_features, "v_rank": self.to_v.lora_layer.rank, "v_hidden_size": self.to_v.lora_layer.out_features, "out_rank": self.to_out[0].lora_layer.rank, "out_hidden_size": self.to_out[0].lora_layer.out_features, } if hasattr(self.processor, "attention_op"): kwargs["attention_op"] = self.processor.attention_op lora_processor = lora_processor_cls(hidden_size, **kwargs) lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict()) elif lora_processor_cls == LoRAAttnAddedKVProcessor: lora_processor = lora_processor_cls( hidden_size, cross_attention_dim=self.add_k_proj.weight.shape[0], rank=self.to_q.lora_layer.rank, network_alpha=self.to_q.lora_layer.network_alpha, ) lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict()) # only save if used if self.add_k_proj.lora_layer is not None: lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict()) lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict()) else: lora_processor.add_k_proj_lora = None lora_processor.add_v_proj_lora = None else: raise ValueError(f"{lora_processor_cls} does not exist.") return lora_processor def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, **cross_attention_kwargs, ) -> torch.Tensor: r""" The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer. """ # The `Attention` class can call different attention processors / attention functions # here we simply pass along all tensors to the selected processor class # For standard processors that are defined here, `**cross_attention_kwargs` is empty attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) unused_kwargs = [k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"cross_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters} return self.processor( self, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, ) def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads batch_size, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is reshaped to `[batch_size * heads, seq_len, dim // heads]`. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads if tensor.ndim == 3: batch_size, seq_len, dim = tensor.shape extra_dim = 1 else: batch_size, extra_dim, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3) if out_dim == 3: tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size) return tensor def get_attention_scores( self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None ) -> torch.Tensor: r""" Compute the attention scores. Args: query (`torch.Tensor`): The query tensor. key (`torch.Tensor`): The key tensor. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. Returns: `torch.Tensor`: The attention probabilities/scores. """ dtype = query.dtype if self.upcast_attention: query = query.float() key = key.float() if attention_mask is None: baddbmm_input = torch.empty( query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device ) beta = 0 else: baddbmm_input = attention_mask beta = 1 attention_scores = torch.baddbmm( baddbmm_input, query, key.transpose(-1, -2), beta=beta, alpha=self.scale, ) del baddbmm_input if self.upcast_softmax: attention_scores = attention_scores.float() attention_probs = attention_scores.softmax(dim=-1) del attention_scores attention_probs = attention_probs.to(dtype) return attention_probs def prepare_attention_mask( self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 ) -> torch.Tensor: r""" Prepare the attention mask for the attention computation. Args: attention_mask (`torch.Tensor`): The attention mask to prepare. target_length (`int`): The target length of the attention mask. This is the length of the attention mask after padding. batch_size (`int`): The batch size, which is used to repeat the attention mask. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the attention mask. Can be either `3` or `4`. Returns: `torch.Tensor`: The prepared attention mask. """ head_size = self.heads if attention_mask is None: return attention_mask current_length: int = attention_mask.shape[-1] if current_length != target_length: if attention_mask.device.type == "mps": # HACK: MPS: Does not support padding by greater than dimension of input tensor. # Instead, we can manually construct the padding tensor. padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, padding], dim=2) else: # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: # we want to instead pad by (0, remaining_length), where remaining_length is: # remaining_length: int = target_length - current_length # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) if out_dim == 3: if attention_mask.shape[0] < batch_size * head_size: attention_mask = attention_mask.repeat_interleave(head_size, dim=0) elif out_dim == 4: attention_mask = attention_mask.unsqueeze(1) attention_mask = attention_mask.repeat_interleave(head_size, dim=1) return attention_mask def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: r""" Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the `Attention` class. Args: encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. Returns: `torch.Tensor`: The normalized encoder hidden states. """ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" if isinstance(self.norm_cross, nn.LayerNorm): encoder_hidden_states = self.norm_cross(encoder_hidden_states) elif isinstance(self.norm_cross, nn.GroupNorm): # Group norm norms along the channels dimension and expects # input to be in the shape of (N, C, *). In this case, we want # to norm along the hidden dimension, so we need to move # (batch_size, sequence_length, hidden_size) -> # (batch_size, hidden_size, sequence_length) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) encoder_hidden_states = self.norm_cross(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) else: assert False return encoder_hidden_states @torch.no_grad() def fuse_projections(self, fuse=True): device = self.to_q.weight.data.device dtype = self.to_q.weight.data.dtype if not self.is_cross_attention: # fetch weight matrices. concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] # create a new single projection layer and copy over the weights. self.to_qkv = self.linear_cls(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_qkv.weight.copy_(concatenated_weights) if self.use_bias: concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) self.to_qkv.bias.copy_(concatenated_bias) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_kv = self.linear_cls(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_kv.weight.copy_(concatenated_weights) if self.use_bias: concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) self.to_kv.bias.copy_(concatenated_bias) self.fused_projections = fuse class AttnProcessor: r""" Default processor for performing attention-related computations. """ def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionAttnProcessor(nn.Module): r""" Processor for implementing attention for the Custom Diffusion method. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ def __init__( self, train_kv: bool = True, train_q_out: bool = True, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class AttnAddedKVProcessor: r""" Processor for performing attention-related computations with extra learnable key and value matrices for the text encoder. """ def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class AttnAddedKVProcessor2_0: r""" Processor for performing scaled dot-product attention (enabled by default if you're using PyTorch 2.0), with extra learnable key and value matrices for the text encoder. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query, out_dim=4) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key, out_dim=4) value = attn.head_to_batch_dim(value, out_dim=4) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1]) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class XFormersAttnAddedKVProcessor: r""" Processor for implementing memory efficient attention using xFormers. Args: attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__(self, attention_op: Optional[Callable] = None): self.attention_op = attention_op def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class XFormersAttnProcessor: r""" Processor for implementing memory efficient attention using xFormers. Args: attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__(self, attention_op: Optional[Callable] = None): self.attention_op = attention_op def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, key_tokens, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, key_tokens, batch_size) if attention_mask is not None: # expand our mask's singleton query_tokens dimension: # [batch*heads, 1, key_tokens] -> # [batch*heads, query_tokens, key_tokens] # so that it can be added as a bias onto the attention scores that xformers computes: # [batch*heads, query_tokens, key_tokens] # we do this explicitly because xformers doesn't broadcast the singleton dimension for us. _, query_tokens, _ = hidden_states.shape attention_mask = attention_mask.expand(-1, query_tokens, -1) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class AttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class FusedAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). It uses fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is currently 🧪 experimental in nature and can change in future. </Tip> """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "FusedAttnProcessor2_0 requires at least PyTorch 2.0, to use it. Please upgrade PyTorch to > 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) if encoder_hidden_states is None: qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) else: if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) query = attn.to_q(hidden_states) kv = attn.to_kv(encoder_hidden_states) split_size = kv.shape[-1] // 2 key, value = torch.split(kv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionXFormersAttnProcessor(nn.Module): r""" Processor for implementing memory efficient attention using xFormers for the Custom Diffusion method. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__( self, train_kv: bool = True, train_q_out: bool = False, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, attention_op: Optional[Callable] = None, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.attention_op = attention_op # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CustomDiffusionAttnProcessor2_0(nn.Module): r""" Processor for implementing attention for the Custom Diffusion method using PyTorch 2.0’s memory-efficient scaled dot-product attention. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ def __init__( self, train_kv: bool = True, train_q_out: bool = True, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states) else: query = attn.to_q(hidden_states) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() inner_dim = hidden_states.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class SlicedAttnProcessor: r""" Processor for implementing sliced attention. Args: slice_size (`int`, *optional*): The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and `attention_head_dim` must be a multiple of the `slice_size`. """ def __init__(self, slice_size: int): self.slice_size = slice_size def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) batch_size_attention, query_tokens, _ = query.shape hidden_states = torch.zeros( (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype ) for i in range(batch_size_attention // self.slice_size): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class SlicedAttnAddedKVProcessor: r""" Processor for implementing sliced attention with extra learnable key and value matrices for the text encoder. Args: slice_size (`int`, *optional*): The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and `attention_head_dim` must be a multiple of the `slice_size`. """ def __init__(self, slice_size): self.slice_size = slice_size def __call__( self, attn: "Attention", hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj batch_size_attention, query_tokens, _ = query.shape hidden_states = torch.zeros( (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype ) for i in range(batch_size_attention // self.slice_size): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class SpatialNorm(nn.Module): """ Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. Args: f_channels (`int`): The number of channels for input to group normalization layer, and output of the spatial norm layer. zq_channels (`int`): The number of channels for the quantized vector as described in the paper. """ def __init__( self, f_channels: int, zq_channels: int, ): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-6, affine=True) self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) def forward(self, f: torch.FloatTensor, zq: torch.FloatTensor) -> torch.FloatTensor: f_size = f.shape[-2:] zq = F.interpolate(zq, size=f_size, mode="nearest") norm_f = self.norm_layer(f) new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) return new_f class LoRAAttnProcessor(nn.Module): def __init__( self, hidden_size: int, cross_attention_dim: Optional[int] = None, rank: int = 4, network_alpha: Optional[int] = None, **kwargs, ): deprecation_message = "Using LoRAAttnProcessor is deprecated. Please use the PEFT backend for all things LoRA. You can install PEFT by running `pip install peft`." deprecate("LoRAAttnProcessor", "0.30.0", deprecation_message, standard_warn=False) super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank q_rank = kwargs.pop("q_rank", None) q_hidden_size = kwargs.pop("q_hidden_size", None) q_rank = q_rank if q_rank is not None else rank q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size v_rank = kwargs.pop("v_rank", None) v_hidden_size = kwargs.pop("v_hidden_size", None) v_rank = v_rank if v_rank is not None else rank v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size out_rank = kwargs.pop("out_rank", None) out_hidden_size = kwargs.pop("out_hidden_size", None) out_rank = out_rank if out_rank is not None else rank out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = AttnProcessor() return attn.processor(attn, hidden_states, **kwargs) class LoRAAttnProcessor2_0(nn.Module): def __init__( self, hidden_size: int, cross_attention_dim: Optional[int] = None, rank: int = 4, network_alpha: Optional[int] = None, **kwargs, ): deprecation_message = "Using LoRAAttnProcessor is deprecated. Please use the PEFT backend for all things LoRA. You can install PEFT by running `pip install peft`." deprecate("LoRAAttnProcessor2_0", "0.30.0", deprecation_message, standard_warn=False) super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank q_rank = kwargs.pop("q_rank", None) q_hidden_size = kwargs.pop("q_hidden_size", None) q_rank = q_rank if q_rank is not None else rank q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size v_rank = kwargs.pop("v_rank", None) v_hidden_size = kwargs.pop("v_hidden_size", None) v_rank = v_rank if v_rank is not None else rank v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size out_rank = kwargs.pop("out_rank", None) out_hidden_size = kwargs.pop("out_hidden_size", None) out_rank = out_rank if out_rank is not None else rank out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = AttnProcessor2_0() return attn.processor(attn, hidden_states, **kwargs) class LoRAXFormersAttnProcessor(nn.Module): r""" Processor for implementing the LoRA attention mechanism with memory efficient attention using xFormers. Args: hidden_size (`int`, *optional*): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*): The number of channels in the `encoder_hidden_states`. rank (`int`, defaults to 4): The dimension of the LoRA update matrices. attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. network_alpha (`int`, *optional*): Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. kwargs (`dict`): Additional keyword arguments to pass to the `LoRALinearLayer` layers. """ def __init__( self, hidden_size: int, cross_attention_dim: int, rank: int = 4, attention_op: Optional[Callable] = None, network_alpha: Optional[int] = None, **kwargs, ): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank self.attention_op = attention_op q_rank = kwargs.pop("q_rank", None) q_hidden_size = kwargs.pop("q_hidden_size", None) q_rank = q_rank if q_rank is not None else rank q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size v_rank = kwargs.pop("v_rank", None) v_hidden_size = kwargs.pop("v_hidden_size", None) v_rank = v_rank if v_rank is not None else rank v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size out_rank = kwargs.pop("out_rank", None) out_hidden_size = kwargs.pop("out_hidden_size", None) out_rank = out_rank if out_rank is not None else rank out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,add_k_proj,add_v_proj,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = XFormersAttnProcessor() return attn.processor(attn, hidden_states, **kwargs) class LoRAAttnAddedKVProcessor(nn.Module): r""" Processor for implementing the LoRA attention mechanism with extra learnable key and value matrices for the text encoder. Args: hidden_size (`int`, *optional*): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. rank (`int`, defaults to 4): The dimension of the LoRA update matrices. network_alpha (`int`, *optional*): Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. kwargs (`dict`): Additional keyword arguments to pass to the `LoRALinearLayer` layers. """ def __init__( self, hidden_size: int, cross_attention_dim: Optional[int] = None, rank: int = 4, network_alpha: Optional[int] = None, ): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) self.add_k_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.add_v_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_k_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,add_k_proj,add_v_proj,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = AttnAddedKVProcessor() return attn.processor(attn, hidden_states, **kwargs) class IPAdapterAttnProcessor(nn.Module): r""" Attention processor for Multiple IP-Adapters. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): The context length of the image features. scale (`float` or List[`float`], defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) self.to_v_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, ip_adapter_masks: Optional[torch.FloatTensor] = None, ): residual = hidden_states # separate ip_hidden_states from encoder_hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): encoder_hidden_states, ip_hidden_states = encoder_hidden_states else: deprecation_message = ( "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning." ) deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]], ) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if ip_adapter_masks is not None: if not isinstance(ip_adapter_masks, torch.Tensor) or ip_adapter_masks.ndim != 4: raise ValueError( " ip_adapter_mask should be a tensor with shape [num_ip_adapter, 1, height, width]." " Please use `IPAdapterMaskProcessor` to preprocess your mask" ) if len(ip_adapter_masks) != len(self.scale): raise ValueError( f"Number of ip_adapter_masks ({len(ip_adapter_masks)}) must match number of IP-Adapters ({len(self.scale)})" ) else: ip_adapter_masks = [None] * len(self.scale) # for ip-adapter for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks ): ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) ip_attention_probs = attn.get_attention_scores(query, ip_key, None) current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states) if mask is not None: mask_downsample = IPAdapterMaskProcessor.downsample( mask, batch_size, current_ip_hidden_states.shape[1], current_ip_hidden_states.shape[2] ) mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device) current_ip_hidden_states = current_ip_hidden_states * mask_downsample hidden_states = hidden_states + scale * current_ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class IPAdapterAttnProcessor2_0(torch.nn.Module): r""" Attention processor for IP-Adapter for PyTorch 2.0. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): The context length of the image features. scale (`float` or `List[float]`, defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) self.to_v_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, ip_adapter_masks: Optional[torch.FloatTensor] = None, ): residual = hidden_states # separate ip_hidden_states from encoder_hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): encoder_hidden_states, ip_hidden_states = encoder_hidden_states else: deprecation_message = ( "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning." ) deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]], ) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if ip_adapter_masks is not None: if not isinstance(ip_adapter_masks, torch.Tensor) or ip_adapter_masks.ndim != 4: raise ValueError( " ip_adapter_mask should be a tensor with shape [num_ip_adapter, 1, height, width]." " Please use `IPAdapterMaskProcessor` to preprocess your mask" ) if len(ip_adapter_masks) != len(self.scale): raise ValueError( f"Number of ip_adapter_masks ({len(ip_adapter_masks)}) must match number of IP-Adapters ({len(self.scale)})" ) else: ip_adapter_masks = [None] * len(self.scale) # for ip-adapter for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks ): ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 current_ip_hidden_states = F.scaled_dot_product_attention( query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False ) current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape( batch_size, -1, attn.heads * head_dim ) current_ip_hidden_states = current_ip_hidden_states.to(query.dtype) if mask is not None: mask_downsample = IPAdapterMaskProcessor.downsample( mask, batch_size, current_ip_hidden_states.shape[1], current_ip_hidden_states.shape[2] ) mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device) current_ip_hidden_states = current_ip_hidden_states * mask_downsample hidden_states = hidden_states + scale * current_ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states LORA_ATTENTION_PROCESSORS = ( LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, LoRAAttnAddedKVProcessor, ) ADDED_KV_ATTENTION_PROCESSORS = ( AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, LoRAAttnAddedKVProcessor, ) CROSS_ATTENTION_PROCESSORS = ( AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, ) AttentionProcessor = Union[ AttnProcessor, AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0, # deprecated LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, LoRAAttnAddedKVProcessor, ]
diffusers/src/diffusers/models/attention_processor.py/0
{ "file_path": "diffusers/src/diffusers/models/attention_processor.py", "repo_id": "diffusers", "token_count": 48583 }
122
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pickle import UnpicklingError from typing import Any, Dict, Union import jax import jax.numpy as jnp import msgpack.exceptions from flax.core.frozen_dict import FrozenDict, unfreeze from flax.serialization import from_bytes, to_bytes from flax.traverse_util import flatten_dict, unflatten_dict from huggingface_hub import create_repo, hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, validate_hf_hub_args, ) from requests import HTTPError from .. import __version__, is_torch_available from ..utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, PushToHubMixin, logging, ) from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax logger = logging.get_logger(__name__) class FlaxModelMixin(PushToHubMixin): r""" Base class for all Flax models. [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and saving models. - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`]. """ config_name = CONFIG_NAME _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] _flax_internal_args = ["name", "parent", "dtype"] @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: """ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. """ # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param if mask is None: return jax.tree_map(conditional_cast, params) flat_params = flatten_dict(params) flat_mask, _ = jax.tree_flatten(mask) for masked, key in zip(flat_mask, flat_params.keys()): if masked: param = flat_params[key] flat_params[key] = conditional_cast(param) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision >>> params = model.to_bf16(params) >>> # If you don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_bf16(params, mask) ```""" return self._cast_floating_to(params, jnp.bfloat16, mask) def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # Download model and configuration from huggingface.co >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to illustrate the use of this method, >>> # we'll first cast to fp16 and back to fp32 >>> params = model.to_f16(params) >>> # now cast back to fp32 >>> params = model.to_fp32(params) ```""" return self._cast_floating_to(params, jnp.float32, mask) def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to cast these to float16 >>> params = model.to_fp16(params) >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_fp16(params, mask) ```""" return self._cast_floating_to(params, jnp.float16, mask) def init_weights(self, rng: jax.Array) -> Dict: raise NotImplementedError(f"init_weights method has to be implemented for {self}") @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype = jnp.float32, *model_args, **kwargs, ): r""" Instantiate a pretrained Flax model from a pretrained model configuration. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved using [`~FlaxModelMixin.save_pretrained`]. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified, all the computation will be performed with the given `dtype`. <Tip> This only specifies the dtype of the *computation* and does not influence the dtype of model parameters. If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and [`~FlaxModelMixin.to_bf16`]. </Tip> model_args (sequence of positional arguments, *optional*): All remaining positional arguments are passed to the underlying model's `__init__` method. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it is loaded) and initiate the model (for example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done). - If a configuration is not provided, `kwargs` are first passed to the configuration class initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds to a configuration attribute is used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute are passed to the underlying model's `__init__` function. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # Download model and configuration from huggingface.co and cache. >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/") ``` If you get the error message below, you need to finetune the weights for your downstream task: ```bash Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) from_pt = kwargs.pop("from_pt", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) user_agent = { "diffusers": __version__, "file_type": "model", "framework": "flax", } # Load config if we don't provide one if config is None: config, unused_kwargs = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, **kwargs, ) model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs) # Load model pretrained_path_with_subfolder = ( pretrained_model_name_or_path if subfolder is None else os.path.join(pretrained_model_name_or_path, subfolder) ) if os.path.isdir(pretrained_path_with_subfolder): if from_pt: if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} " ) model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME) # Check if pytorch weights exist instead elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model" " using `from_pt=True`." ) else: raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_path_with_subfolder}." ) else: try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision, ) except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" f"{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your" " internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) if from_pt: if is_torch_available(): from .modeling_utils import load_state_dict else: raise EnvironmentError( "Can't load the model in PyTorch format because PyTorch is not installed. " "Please, install PyTorch or use native Flax weights." ) # Step 1: Get the pytorch file pytorch_model_file = load_state_dict(model_file) # Step 2: Convert the weights state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model) else: try: with open(model_file, "rb") as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(model_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ") # make sure all arrays are stored as jnp.ndarray # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: # https://github.com/google/flax/issues/1261 state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state) # flatten dicts state = flatten_dict(state) params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0)) required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) shape_state = flatten_dict(unfreeze(params_shape_tree)) missing_keys = required_params - set(state.keys()) unexpected_keys = set(state.keys()) - required_params if missing_keys: logger.warning( f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. " "Make sure to call model.init_weights to initialize the missing weights." ) cls._missing_keys = missing_keys for key in state.keys(): if key in shape_state and state[key].shape != shape_state[key].shape: raise ValueError( f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. " ) # remove unexpected keys to not be saved again for unexpected_key in unexpected_keys: del state[unexpected_key] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) return model, unflatten_dict(state) def save_pretrained( self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], is_main_process: bool = True, push_to_hub: bool = False, **kwargs, ): """ Save a model and its configuration file to a directory so that it can be reloaded using the [`~FlaxModelMixin.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to save a model and its configuration file to. Will be created if it doesn't exist. params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", False) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # save model output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) with open(output_model_file, "wb") as f: model_bytes = to_bytes(params) f.write(model_bytes) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
diffusers/src/diffusers/models/modeling_flax_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "repo_id": "diffusers", "token_count": 11975 }
123
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, Optional import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..attention import BasicTransformerBlock, TemporalBasicTransformerBlock from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..resnet import AlphaBlender @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.FloatTensor class TransformerTemporalModel(ModelMixin, ConfigMixin): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlock` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported activation functions. norm_elementwise_affine (`bool`, *optional*): Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. double_self_attention (`bool`, *optional*): Configure if each `TransformerBlock` should contain two self-attention layers. positional_embeddings: (`str`, *optional*): The type of positional embeddings to apply to the sequence input before passing use. num_positional_embeddings: (`int`, *optional*): The maximum length of the sequence over which to apply positional embeddings. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, double_self_attention: bool = True, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings, ) for d in range(num_layers) ] ) self.proj_out = nn.Linear(inner_dim, in_channels) def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.LongTensor] = None, timestep: Optional[torch.LongTensor] = None, class_labels: torch.LongTensor = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> TransformerTemporalModelOutput: """ The [`TransformerTemporal`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. num_frames (`int`, *optional*, defaults to 1): The number of frames to be processed per batch. This is used to reshape the hidden states. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, channel, height, width = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(hidden_states) # 2. Blocks for block in self.transformer_blocks: hidden_states = block( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states[None, None, :] .reshape(batch_size, height, width, num_frames, channel) .permute(0, 3, 4, 1, 2) .contiguous() ) hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output) class TransformerSpatioTemporalModel(nn.Module): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). out_channels (`int`, *optional*): The number of channels in the output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. """ def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: int = 320, out_channels: Optional[int] = None, num_layers: int = 1, cross_attention_dim: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.inner_dim = inner_dim # 2. Define input layers self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim, ) for d in range(num_layers) ] ) time_mix_inner_dim = inner_dim self.temporal_transformer_blocks = nn.ModuleList( [ TemporalBasicTransformerBlock( inner_dim, time_mix_inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim, ) for _ in range(num_layers) ] ) time_embed_dim = in_channels * 4 self.time_pos_embed = TimestepEmbedding(in_channels, time_embed_dim, out_dim=in_channels) self.time_proj = Timesteps(in_channels, True, 0) self.time_mixer = AlphaBlender(alpha=0.5, merge_strategy="learned_with_images") # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels # TODO: should use out_channels for continuous projections self.proj_out = nn.Linear(inner_dim, in_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input hidden_states. num_frames (`int`): The number of frames to be processed per batch. This is used to reshape the hidden states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. image_only_indicator (`torch.LongTensor` of shape `(batch size, num_frames)`, *optional*): A tensor indicating whether the input contains only images. 1 indicates that the input contains only images, 0 indicates that the input contains video frames. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain tuple. Returns: [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, _, height, width = hidden_states.shape num_frames = image_only_indicator.shape[-1] batch_size = batch_frames // num_frames time_context = encoder_hidden_states time_context_first_timestep = time_context[None, :].reshape( batch_size, num_frames, -1, time_context.shape[-1] )[:, 0] time_context = time_context_first_timestep[None, :].broadcast_to( height * width, batch_size, 1, time_context.shape[-1] ) time_context = time_context.reshape(height * width * batch_size, 1, time_context.shape[-1]) residual = hidden_states hidden_states = self.norm(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_frames, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) num_frames_emb = torch.arange(num_frames, device=hidden_states.device) num_frames_emb = num_frames_emb.repeat(batch_size, 1) num_frames_emb = num_frames_emb.reshape(-1) t_emb = self.time_proj(num_frames_emb) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_pos_embed(t_emb) emb = emb[:, None, :] # 2. Blocks for block, temporal_block in zip(self.transformer_blocks, self.temporal_transformer_blocks): if self.training and self.gradient_checkpointing: hidden_states = torch.utils.checkpoint.checkpoint( block, hidden_states, None, encoder_hidden_states, None, use_reentrant=False, ) else: hidden_states = block( hidden_states, encoder_hidden_states=encoder_hidden_states, ) hidden_states_mix = hidden_states hidden_states_mix = hidden_states_mix + emb hidden_states_mix = temporal_block( hidden_states_mix, num_frames=num_frames, encoder_hidden_states=time_context, ) hidden_states = self.time_mixer( x_spatial=hidden_states, x_temporal=hidden_states_mix, image_only_indicator=image_only_indicator, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch_frames, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_temporal.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_temporal.py", "repo_id": "diffusers", "token_count": 7286 }
124
# Copyright 2024 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import logging from ..activations import get_activation from ..attention import Attention, FeedForward from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..transformers.transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from .unet_3d_condition import UNet3DConditionOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name class I2VGenXLTransformerTemporalEncoder(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, activation_fn: str = "geglu", upcast_attention: bool = False, ff_inner_dim: Optional[int] = None, dropout: int = 0.0, ): super().__init__() self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=1e-5) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=True, ) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=False, inner_dim=ff_inner_dim, bias=True, ) def forward( self, hidden_states: torch.FloatTensor, ) -> torch.FloatTensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) ff_output = self.ff(hidden_states) hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" I2VGenXL UNet. It is a conditional 3D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample-shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 64): Attention head dim. num_attention_heads (`int`, *optional*): The number of attention heads. """ _supports_gradient_checkpointing = False @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), up_block_types: Tuple[str, ...] = ( "UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", ), block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, norm_num_groups: Optional[int] = 32, cross_attention_dim: int = 1024, attention_head_dim: Union[int, Tuple[int]] = 64, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, ): super().__init__() # When we first integrated the UNet into the library, we didn't have `attention_head_dim`. As a consequence # of that, we used `num_attention_heads` for arguments that actually denote attention head dimension. This # is why we ignore `num_attention_heads` and calculate it from `attention_head_dims` below. # This is still an incorrect way of calculating `num_attention_heads` but we need to stick to it # without running proper depcrecation cycles for the {down,mid,up} blocks which are a # part of the public API. num_attention_heads = attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d(in_channels + in_channels, block_out_channels[0], kernel_size=3, padding=1) self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=num_attention_heads, in_channels=block_out_channels[0], num_layers=1, norm_num_groups=norm_num_groups, ) # image embedding self.image_latents_proj_in = nn.Sequential( nn.Conv2d(4, in_channels * 4, 3, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 4, in_channels * 4, 3, stride=1, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 4, in_channels, 3, stride=1, padding=1), ) self.image_latents_temporal_encoder = I2VGenXLTransformerTemporalEncoder( dim=in_channels, num_attention_heads=2, ff_inner_dim=in_channels * 4, attention_head_dim=in_channels, activation_fn="gelu", ) self.image_latents_context_embedding = nn.Sequential( nn.Conv2d(4, in_channels * 8, 3, padding=1), nn.SiLU(), nn.AdaptiveAvgPool2d((32, 32)), nn.Conv2d(in_channels * 8, in_channels * 16, 3, stride=2, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 16, cross_attention_dim, 3, stride=2, padding=1), ) # other embeddings -- time, context, fps, etc. time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn="silu") self.context_embedding = nn.Sequential( nn.Linear(cross_attention_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, cross_attention_dim * in_channels), ) self.fps_embedding = nn.Sequential( nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim) ) # blocks self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-05, resnet_act_fn="silu", resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], downsample_padding=1, dual_cross_attention=False, ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlock3DCrossAttn( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=1e-05, resnet_act_fn="silu", output_scale_factor=1, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-05, resnet_act_fn="silu", resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=False, resolution_idx=i, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-05) self.conv_act = get_activation("silu") self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel._set_gradient_checkpointing def _set_gradient_checkpointing(self, module, value: bool = False) -> None: if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): module.gradient_checkpointing = value # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu def enable_freeu(self, s1, s2, b1, b2): r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stage blocks where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ for i, upsample_block in enumerate(self.up_blocks): setattr(upsample_block, "s1", s1) setattr(upsample_block, "s2", s2) setattr(upsample_block, "b1", b1) setattr(upsample_block, "b2", b2) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu def disable_freeu(self): """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} for i, upsample_block in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], fps: torch.Tensor, image_latents: torch.Tensor, image_embeddings: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[UNet3DConditionOutput, Tuple[torch.FloatTensor]]: r""" The [`I2VGenXLUNet`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. fps (`torch.Tensor`): Frames per second for the video being generated. Used as a "micro-condition". image_latents (`torch.FloatTensor`): Image encodings from the VAE. image_embeddings (`torch.FloatTensor`): Projection embeddings of the conditioning image computed with a vision encoder. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain tuple. Returns: [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ batch_size, channels, num_frames, height, width = sample.shape # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass `timesteps` as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timesteps, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) t_emb = self.time_embedding(t_emb, timestep_cond) # 2. FPS # broadcast to batch dimension in a way that's compatible with ONNX/Core ML fps = fps.expand(fps.shape[0]) fps_emb = self.fps_embedding(self.time_proj(fps).to(dtype=self.dtype)) # 3. time + FPS embeddings. emb = t_emb + fps_emb emb = emb.repeat_interleave(repeats=num_frames, dim=0) # 4. context embeddings. # The context embeddings consist of both text embeddings from the input prompt # AND the image embeddings from the input image. For images, both VAE encodings # and the CLIP image embeddings are incorporated. # So the final `context_embeddings` becomes the query for cross-attention. context_emb = sample.new_zeros(batch_size, 0, self.config.cross_attention_dim) context_emb = torch.cat([context_emb, encoder_hidden_states], dim=1) image_latents_for_context_embds = image_latents[:, :, :1, :] image_latents_context_embs = image_latents_for_context_embds.permute(0, 2, 1, 3, 4).reshape( image_latents_for_context_embds.shape[0] * image_latents_for_context_embds.shape[2], image_latents_for_context_embds.shape[1], image_latents_for_context_embds.shape[3], image_latents_for_context_embds.shape[4], ) image_latents_context_embs = self.image_latents_context_embedding(image_latents_context_embs) _batch_size, _channels, _height, _width = image_latents_context_embs.shape image_latents_context_embs = image_latents_context_embs.permute(0, 2, 3, 1).reshape( _batch_size, _height * _width, _channels ) context_emb = torch.cat([context_emb, image_latents_context_embs], dim=1) image_emb = self.context_embedding(image_embeddings) image_emb = image_emb.view(-1, self.config.in_channels, self.config.cross_attention_dim) context_emb = torch.cat([context_emb, image_emb], dim=1) context_emb = context_emb.repeat_interleave(repeats=num_frames, dim=0) image_latents = image_latents.permute(0, 2, 1, 3, 4).reshape( image_latents.shape[0] * image_latents.shape[2], image_latents.shape[1], image_latents.shape[3], image_latents.shape[4], ) image_latents = self.image_latents_proj_in(image_latents) image_latents = ( image_latents[None, :] .reshape(batch_size, num_frames, channels, height, width) .permute(0, 3, 4, 1, 2) .reshape(batch_size * height * width, num_frames, channels) ) image_latents = self.image_latents_temporal_encoder(image_latents) image_latents = image_latents.reshape(batch_size, height, width, num_frames, channels).permute(0, 4, 3, 1, 2) # 5. pre-process sample = torch.cat([sample, image_latents], dim=1) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) sample = self.transformer_in( sample, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # 6. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=context_emb, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples # 7. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=context_emb, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) # 8. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=context_emb, upsample_size=upsample_size, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, num_frames=num_frames, ) # 9. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) # reshape to (batch, channel, framerate, width, height) sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) if not return_dict: return (sample,) return UNet3DConditionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_i2vgen_xl.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_i2vgen_xl.py", "repo_id": "diffusers", "token_count": 14630 }
125
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {"pipeline_output": ["AnimateDiffPipelineOutput"]} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_animatediff"] = ["AnimateDiffPipeline"] _import_structure["pipeline_animatediff_video2video"] = ["AnimateDiffVideoToVideoPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_animatediff import AnimateDiffPipeline from .pipeline_animatediff_video2video import AnimateDiffVideoToVideoPipeline from .pipeline_output import AnimateDiffPipelineOutput else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/animatediff/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/animatediff/__init__.py", "repo_id": "diffusers", "token_count": 612 }
126
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import torch from ...models import UNet2DModel from ...schedulers import CMStochasticIterativeScheduler from ...utils import ( logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ConsistencyModelPipeline >>> device = "cuda" >>> # Load the cd_imagenet64_l2 checkpoint. >>> model_id_or_path = "openai/diffusers-cd_imagenet64_l2" >>> pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) >>> pipe.to(device) >>> # Onestep Sampling >>> image = pipe(num_inference_steps=1).images[0] >>> image.save("cd_imagenet64_l2_onestep_sample.png") >>> # Onestep sampling, class-conditional image generation >>> # ImageNet-64 class label 145 corresponds to king penguins >>> image = pipe(num_inference_steps=1, class_labels=145).images[0] >>> image.save("cd_imagenet64_l2_onestep_sample_penguin.png") >>> # Multistep sampling, class-conditional image generation >>> # Timesteps can be explicitly specified; the particular timesteps below are from the original Github repo: >>> # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L77 >>> image = pipe(num_inference_steps=None, timesteps=[22, 0], class_labels=145).images[0] >>> image.save("cd_imagenet64_l2_multistep_sample_penguin.png") ``` """ class ConsistencyModelPipeline(DiffusionPipeline): r""" Pipeline for unconditional or class-conditional image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only compatible with [`CMStochasticIterativeScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: super().__init__() self.register_modules( unet=unet, scheduler=scheduler, ) self.safety_checker = None def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Follows diffusers.VaeImageProcessor.postprocess def postprocess_image(self, sample: torch.FloatTensor, output_type: str = "pil"): if output_type not in ["pt", "np", "pil"]: raise ValueError( f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" ) # Equivalent to diffusers.VaeImageProcessor.denormalize sample = (sample / 2 + 0.5).clamp(0, 1) if output_type == "pt": return sample # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "np": return sample # Output_type must be 'pil' sample = self.numpy_to_pil(sample) return sample def prepare_class_labels(self, batch_size, device, class_labels=None): if self.unet.config.num_class_embeds is not None: if isinstance(class_labels, list): class_labels = torch.tensor(class_labels, dtype=torch.int) elif isinstance(class_labels, int): assert batch_size == 1, "Batch size must be 1 if classes is an int" class_labels = torch.tensor([class_labels], dtype=torch.int) elif class_labels is None: # Randomly generate batch_size class labels # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) class_labels = class_labels.to(device) else: class_labels = None return class_labels def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): if num_inference_steps is None and timesteps is None: raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") if num_inference_steps is not None and timesteps is not None: logger.warning( f"Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied;" " `timesteps` will be used over `num_inference_steps`." ) if latents is not None: expected_shape = (batch_size, 3, img_size, img_size) if latents.shape != expected_shape: raise ValueError(f"The shape of latents is {latents.shape} but is expected to be {expected_shape}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, batch_size: int = 1, class_labels: Optional[Union[torch.Tensor, List[int], int]] = None, num_inference_steps: int = 1, timesteps: List[int] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, ): r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. class_labels (`torch.Tensor` or `List[int]` or `int`, *optional*): Optional class labels for conditioning class-conditional consistency models. Not used if the model is not class-conditional. num_inference_steps (`int`, *optional*, defaults to 1): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Prepare call parameters img_size = self.unet.config.sample_size device = self._execution_device # 1. Check inputs self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) # 2. Prepare image latents # Sample image latents x_0 ~ N(0, sigma_0^2 * I) sample = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=img_size, width=img_size, dtype=self.unet.dtype, device=device, generator=generator, latents=latents, ) # 3. Handle class_labels for class-conditional models class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) # 4. Prepare timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps # 5. Denoising loop # Multistep sampling: implements Algorithm 1 in the paper with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): scaled_sample = self.scheduler.scale_model_input(sample, t) model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] sample = self.scheduler.step(model_output, t, sample, generator=generator)[0] # call the callback, if provided progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, sample) # 6. Post-process image sample image = self.postprocess_image(sample, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py", "repo_id": "diffusers", "token_count": 5192 }
127
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import math import os from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ....utils import is_note_seq_available from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH if is_note_seq_available(): import note_seq else: raise ImportError("Please install note-seq via `pip install note-seq`") INPUT_FEATURE_LENGTH = 2048 SAMPLE_RATE = 16000 HOP_SIZE = 320 FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) DEFAULT_STEPS_PER_SECOND = 100 DEFAULT_MAX_SHIFT_SECONDS = 10 DEFAULT_NUM_VELOCITY_BINS = 1 SLAKH_CLASS_PROGRAMS = { "Acoustic Piano": 0, "Electric Piano": 4, "Chromatic Percussion": 8, "Organ": 16, "Acoustic Guitar": 24, "Clean Electric Guitar": 26, "Distorted Electric Guitar": 29, "Acoustic Bass": 32, "Electric Bass": 33, "Violin": 40, "Viola": 41, "Cello": 42, "Contrabass": 43, "Orchestral Harp": 46, "Timpani": 47, "String Ensemble": 48, "Synth Strings": 50, "Choir and Voice": 52, "Orchestral Hit": 55, "Trumpet": 56, "Trombone": 57, "Tuba": 58, "French Horn": 60, "Brass Section": 61, "Soprano/Alto Sax": 64, "Tenor Sax": 66, "Baritone Sax": 67, "Oboe": 68, "English Horn": 69, "Bassoon": 70, "Clarinet": 71, "Pipe": 73, "Synth Lead": 80, "Synth Pad": 88, } @dataclasses.dataclass class NoteRepresentationConfig: """Configuration note representations.""" onsets_only: bool include_ties: bool @dataclasses.dataclass class NoteEventData: pitch: int velocity: Optional[int] = None program: Optional[int] = None is_drum: Optional[bool] = None instrument: Optional[int] = None @dataclasses.dataclass class NoteEncodingState: """Encoding state for note transcription, keeping track of active pitches.""" # velocity bin for active pitches and programs active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) @dataclasses.dataclass class EventRange: type: str min_value: int max_value: int @dataclasses.dataclass class Event: type: str value: int class Tokenizer: def __init__(self, regular_ids: int): # The special tokens: 0=PAD, 1=EOS, and 2=UNK self._num_special_tokens = 3 self._num_regular_tokens = regular_ids def encode(self, token_ids): encoded = [] for token_id in token_ids: if not 0 <= token_id < self._num_regular_tokens: raise ValueError( f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})" ) encoded.append(token_id + self._num_special_tokens) # Add EOS token encoded.append(1) # Pad to till INPUT_FEATURE_LENGTH encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) return encoded class Codec: """Encode and decode events. Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not include things like EOS or UNK token handling. To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required and specified separately. """ def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): """Define Codec. Args: max_shift_steps: Maximum number of shift steps that can be encoded. steps_per_second: Shift steps will be interpreted as having a duration of 1 / steps_per_second. event_ranges: Other supported event types and their ranges. """ self.steps_per_second = steps_per_second self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) self._event_ranges = [self._shift_range] + event_ranges # Ensure all event types have unique names. assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) @property def num_classes(self) -> int: return sum(er.max_value - er.min_value + 1 for er in self._event_ranges) # The next couple methods are simplified special case methods just for shift # events that are intended to be used from within autograph functions. def is_shift_event_index(self, index: int) -> bool: return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value) @property def max_shift_steps(self) -> int: return self._shift_range.max_value def encode_event(self, event: Event) -> int: """Encode an event to an index.""" offset = 0 for er in self._event_ranges: if event.type == er.type: if not er.min_value <= event.value <= er.max_value: raise ValueError( f"Event value {event.value} is not within valid range " f"[{er.min_value}, {er.max_value}] for type {event.type}" ) return offset + event.value - er.min_value offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event type: {event.type}") def event_type_range(self, event_type: str) -> Tuple[int, int]: """Return [min_id, max_id] for an event type.""" offset = 0 for er in self._event_ranges: if event_type == er.type: return offset, offset + (er.max_value - er.min_value) offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event type: {event_type}") def decode_event_index(self, index: int) -> Event: """Decode an event index to an Event.""" offset = 0 for er in self._event_ranges: if offset <= index <= offset + er.max_value - er.min_value: return Event(type=er.type, value=er.min_value + index - offset) offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event index: {index}") @dataclasses.dataclass class ProgramGranularity: # both tokens_map_fn and program_map_fn should be idempotent tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] program_map_fn: Callable[[int], int] def drop_programs(tokens, codec: Codec): """Drops program change events from a token sequence.""" min_program_id, max_program_id = codec.event_type_range("program") return tokens[(tokens < min_program_id) | (tokens > max_program_id)] def programs_to_midi_classes(tokens, codec): """Modifies program events to be the first program in the MIDI class.""" min_program_id, max_program_id = codec.event_type_range("program") is_program = (tokens >= min_program_id) & (tokens <= max_program_id) return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) PROGRAM_GRANULARITIES = { # "flat" granularity; drop program change tokens and set NoteSequence # programs to zero "flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), # map each program to the first program in its MIDI class "midi_class": ProgramGranularity( tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8) ), # leave programs as is "full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program), } def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): """ equivalent of tf.signal.frame """ signal_length = signal.shape[axis] if pad_end: frames_overlap = frame_length - frame_step rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) pad_size = int(frame_length - rest_samples) if pad_size != 0: pad_axis = [0] * signal.ndim pad_axis[axis] = pad_size signal = F.pad(signal, pad_axis, "constant", pad_value) frames = signal.unfold(axis, frame_length, frame_step) return frames def program_to_slakh_program(program): # this is done very hackily, probably should use a custom mapping for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): if program >= slakh_program: return slakh_program def audio_to_frames( samples, hop_size: int, frame_rate: int, ) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: """Convert audio samples to non-overlapping frames and frame times.""" frame_size = hop_size samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant") # Split audio into frames. frames = frame( torch.Tensor(samples).unsqueeze(0), frame_length=frame_size, frame_step=frame_size, pad_end=False, # TODO check why its off by 1 here when True ) num_frames = len(samples) // frame_size times = np.arange(num_frames) / frame_rate return frames, times def note_sequence_to_onsets_and_offsets_and_programs( ns: note_seq.NoteSequence, ) -> Tuple[Sequence[float], Sequence[NoteEventData]]: """Extract onset & offset times and pitches & programs from a NoteSequence. The onset & offset times will not necessarily be in sorted order. Args: ns: NoteSequence from which to extract onsets and offsets. Returns: times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for note offsets. """ # Sort by program and pitch and put offsets before onsets as a tiebreaker for # subsequent stable sort. notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] values = [ NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) for note in notes if not note.is_drum ] + [ NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) for note in notes ] return times, values def num_velocity_bins_from_codec(codec: Codec): """Get number of velocity bins from event codec.""" lo, hi = codec.event_type_range("velocity") return hi - lo # segment an array into segments of length n def segment(a, n): return [a[i : i + n] for i in range(0, len(a), n)] def velocity_to_bin(velocity, num_velocity_bins): if velocity == 0: return 0 else: return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) def note_event_data_to_events( state: Optional[NoteEncodingState], value: NoteEventData, codec: Codec, ) -> Sequence[Event]: """Convert note event data to a sequence of events.""" if value.velocity is None: # onsets only, no program or velocity return [Event("pitch", value.pitch)] else: num_velocity_bins = num_velocity_bins_from_codec(codec) velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) if value.program is None: # onsets + offsets + velocities only, no programs if state is not None: state.active_pitches[(value.pitch, 0)] = velocity_bin return [Event("velocity", velocity_bin), Event("pitch", value.pitch)] else: if value.is_drum: # drum events use a separate vocabulary return [Event("velocity", velocity_bin), Event("drum", value.pitch)] else: # program + velocity + pitch if state is not None: state.active_pitches[(value.pitch, value.program)] = velocity_bin return [ Event("program", value.program), Event("velocity", velocity_bin), Event("pitch", value.pitch), ] def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: """Output program and pitch events for active notes plus a final tie event.""" events = [] for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): if state.active_pitches[(pitch, program)]: events += [Event("program", program), Event("pitch", pitch)] events.append(Event("tie", 0)) return events def encode_and_index_events( state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None ): """Encode a sequence of timed events and index to audio frame times. Encodes time shifts as repeated single step shifts for later run length encoding. Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio frame. This can be used e.g. to prepend events representing the current state to a targets segment. Args: state: Initial event encoding state. event_times: Sequence of event times. event_values: Sequence of event values. encode_event_fn: Function that transforms event value into a sequence of one or more Event objects. codec: An Codec object that maps Event objects to indices. frame_times: Time for every audio frame. encoding_state_to_events_fn: Function that transforms encoding state into a sequence of one or more Event objects. Returns: events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame. Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of another. event_end_indices: Corresponding end event index for every audio frame. Used to ensure when slicing that one chunk ends where the next begins. Should always be true that event_end_indices[i] = event_start_indices[i + 1]. state_events: Encoded "state" events representing the encoding state before each event. state_event_indices: Corresponding state event index for every audio frame. """ indices = np.argsort(event_times, kind="stable") event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] event_values = [event_values[i] for i in indices] events = [] state_events = [] event_start_indices = [] state_event_indices = [] cur_step = 0 cur_event_idx = 0 cur_state_event_idx = 0 def fill_event_start_indices_to_cur_step(): while ( len(event_start_indices) < len(frame_times) and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second ): event_start_indices.append(cur_event_idx) state_event_indices.append(cur_state_event_idx) for event_step, event_value in zip(event_steps, event_values): while event_step > cur_step: events.append(codec.encode_event(Event(type="shift", value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) cur_state_event_idx = len(state_events) if encoding_state_to_events_fn: # Dump state to state events *before* processing the next event, because # we want to capture the state prior to the occurrence of the event. for e in encoding_state_to_events_fn(state): state_events.append(codec.encode_event(e)) for e in encode_event_fn(state, event_value, codec): events.append(codec.encode_event(e)) # After the last event, continue filling out the event_start_indices array. # The inequality is not strict because if our current step lines up exactly # with (the start of) an audio frame, we need to add an additional shift event # to "cover" that frame. while cur_step / codec.steps_per_second <= frame_times[-1]: events.append(codec.encode_event(Event(type="shift", value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) # Now fill in event_end_indices. We need this extra array to make sure that # when we slice events, each slice ends exactly where the subsequent slice # begins. event_end_indices = event_start_indices[1:] + [len(events)] events = np.array(events).astype(np.int32) state_events = np.array(state_events).astype(np.int32) event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) outputs = [] for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices): outputs.append( { "inputs": events, "event_start_indices": start_indices, "event_end_indices": end_indices, "state_events": state_events, "state_event_indices": event_indices, } ) return outputs def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"): """Extract target sequence corresponding to audio token segment.""" features = features.copy() start_idx = features["event_start_indices"][0] end_idx = features["event_end_indices"][-1] features[feature_key] = features[feature_key][start_idx:end_idx] if state_events_end_token is not None: # Extract the state events corresponding to the audio start token, and # prepend them to the targets array. state_event_start_idx = features["state_event_indices"][0] state_event_end_idx = state_event_start_idx + 1 while features["state_events"][state_event_end_idx - 1] != state_events_end_token: state_event_end_idx += 1 features[feature_key] = np.concatenate( [ features["state_events"][state_event_start_idx:state_event_end_idx], features[feature_key], ], axis=0, ) return features def map_midi_programs( feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs" ) -> Mapping[str, Any]: """Apply MIDI program map to token sequences.""" granularity = PROGRAM_GRANULARITIES[granularity_type] feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) return feature def run_length_encode_shifts_fn( features, codec: Codec, feature_key: str = "inputs", state_change_event_types: Sequence[str] = (), ) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: """Return a function that run-length encodes shifts for a given codec. Args: codec: The Codec to use for shift events. feature_key: The feature key for which to run-length encode shifts. state_change_event_types: A list of event types that represent state changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones will be removed. Returns: A preprocessing function that run-length encodes single-step shifts. """ state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: """Combine leading/interior shifts, trim trailing shifts. Args: features: Dict of features to process. Returns: A dict of features. """ events = features[feature_key] shift_steps = 0 total_shift_steps = 0 output = np.array([], dtype=np.int32) current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) for event in events: if codec.is_shift_event_index(event): shift_steps += 1 total_shift_steps += 1 else: # If this event is a state change and has the same value as the current # state, we can skip it entirely. is_redundant = False for i, (min_index, max_index) in enumerate(state_change_event_ranges): if (min_index <= event) and (event <= max_index): if current_state[i] == event: is_redundant = True current_state[i] = event if is_redundant: continue # Once we've reached a non-shift event, RLE all previous shift events # before outputting the non-shift event. if shift_steps > 0: shift_steps = total_shift_steps while shift_steps > 0: output_steps = np.minimum(codec.max_shift_steps, shift_steps) output = np.concatenate([output, [output_steps]], axis=0) shift_steps -= output_steps output = np.concatenate([output, [event]], axis=0) features[feature_key] = output return features return run_length_encode_shifts(features) def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): tie_token = codec.encode_event(Event("tie", 0)) state_events_end_token = tie_token if note_representation_config.include_ties else None features = extract_sequence_with_indices( features, state_events_end_token=state_events_end_token, feature_key="inputs" ) features = map_midi_programs(features, codec) features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"]) return features class MidiProcessor: def __init__(self): self.codec = Codec( max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, steps_per_second=DEFAULT_STEPS_PER_SECOND, event_ranges=[ EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS), EventRange("tie", 0, 0), EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), ], ) self.tokenizer = Tokenizer(self.codec.num_classes) self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) def __call__(self, midi: Union[bytes, os.PathLike, str]): if not isinstance(midi, bytes): with open(midi, "rb") as f: midi = f.read() ns = note_seq.midi_to_note_sequence(midi) ns_sus = note_seq.apply_sustain_control_changes(ns) for note in ns_sus.notes: if not note.is_drum: note.program = program_to_slakh_program(note.program) samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) _, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) events = encode_and_index_events( state=NoteEncodingState(), event_times=times, event_values=values, frame_times=frame_times, codec=self.codec, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=note_encoding_state_to_events, ) events = [ note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events ] input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events] return input_tokens
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py", "repo_id": "diffusers", "token_count": 10185 }
128
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_kandinsky2_2"] = ["KandinskyV22Pipeline"] _import_structure["pipeline_kandinsky2_2_combined"] = [ "KandinskyV22CombinedPipeline", "KandinskyV22Img2ImgCombinedPipeline", "KandinskyV22InpaintCombinedPipeline", ] _import_structure["pipeline_kandinsky2_2_controlnet"] = ["KandinskyV22ControlnetPipeline"] _import_structure["pipeline_kandinsky2_2_controlnet_img2img"] = ["KandinskyV22ControlnetImg2ImgPipeline"] _import_structure["pipeline_kandinsky2_2_img2img"] = ["KandinskyV22Img2ImgPipeline"] _import_structure["pipeline_kandinsky2_2_inpainting"] = ["KandinskyV22InpaintPipeline"] _import_structure["pipeline_kandinsky2_2_prior"] = ["KandinskyV22PriorPipeline"] _import_structure["pipeline_kandinsky2_2_prior_emb2emb"] = ["KandinskyV22PriorEmb2EmbPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_kandinsky2_2 import KandinskyV22Pipeline from .pipeline_kandinsky2_2_combined import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/kandinsky2_2/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky2_2/__init__.py", "repo_id": "diffusers", "token_count": 1190 }
129
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import re import warnings from pathlib import Path from typing import Any, Dict, List, Optional, Union import torch from huggingface_hub import ( model_info, ) from packaging import version from ..utils import ( SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, get_class_from_dynamic_module, is_peft_available, is_transformers_available, logging, ) from ..utils.torch_utils import is_compiled_module if is_transformers_available(): import transformers from transformers import PreTrainedModel from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME from huggingface_hub.utils import validate_hf_hub_args from ..utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME INDEX_FILE = "diffusion_pytorch_model.bin" CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" DUMMY_MODULES_FOLDER = "diffusers.utils" TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" CONNECTED_PIPES_KEYS = ["prior"] logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "ModelMixin": ["save_pretrained", "from_pretrained"], "SchedulerMixin": ["save_pretrained", "from_pretrained"], "DiffusionPipeline": ["save_pretrained", "from_pretrained"], "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "PreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, "onnxruntime.training": { "ORTModule": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def is_safetensors_compatible(filenames, variant=None, passed_components=None) -> bool: """ Checking for safetensors compatibility: - By default, all models are saved with the default pytorch serialization, so we use the list of default pytorch files to know which safetensors files are needed. - The model is safetensors compatible only if there is a matching safetensors file for every default pytorch file. Converting default pytorch serialized filenames to safetensors serialized filenames: - For models from the diffusers library, just replace the ".bin" extension with ".safetensors" - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin" extension is replaced with ".safetensors" """ pt_filenames = [] sf_filenames = set() passed_components = passed_components or [] for filename in filenames: _, extension = os.path.splitext(filename) if len(filename.split("/")) == 2 and filename.split("/")[0] in passed_components: continue if extension == ".bin": pt_filenames.append(os.path.normpath(filename)) elif extension == ".safetensors": sf_filenames.add(os.path.normpath(filename)) for filename in pt_filenames: # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extension = '.bam' path, filename = os.path.split(filename) filename, extension = os.path.splitext(filename) if filename.startswith("pytorch_model"): filename = filename.replace("pytorch_model", "model") else: filename = filename expected_sf_filename = os.path.normpath(os.path.join(path, filename)) expected_sf_filename = f"{expected_sf_filename}.safetensors" if expected_sf_filename not in sf_filenames: logger.warning(f"{expected_sf_filename} not found") return False return True def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]: weight_names = [ WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, FLAX_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ] if is_transformers_available(): weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] # model_pytorch, diffusion_model_pytorch, ... weight_prefixes = [w.split(".")[0] for w in weight_names] # .bin, .safetensors, ... weight_suffixs = [w.split(".")[-1] for w in weight_names] # -00001-of-00002 transformers_index_format = r"\d{5}-of-\d{5}" if variant is not None: # `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors` variant_file_re = re.compile( rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$" ) # `text_encoder/pytorch_model.bin.index.fp16.json` variant_index_re = re.compile( rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$" ) # `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors` non_variant_file_re = re.compile( rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$" ) # `text_encoder/pytorch_model.bin.index.json` non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json") if variant is not None: variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None} variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None} variant_filenames = variant_weights | variant_indexes else: variant_filenames = set() non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None} non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None} non_variant_filenames = non_variant_weights | non_variant_indexes # all variant filenames will be used by default usable_filenames = set(variant_filenames) def convert_to_variant(filename): if "index" in filename: variant_filename = filename.replace("index", f"index.{variant}") elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None: variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}" else: variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}" return variant_filename for f in non_variant_filenames: variant_filename = convert_to_variant(f) if variant_filename not in usable_filenames: usable_filenames.add(f) return usable_filenames, variant_filenames @validate_hf_hub_args def warn_deprecated_model_variant(pretrained_model_name_or_path, token, variant, revision, model_filenames): info = model_info( pretrained_model_name_or_path, token=token, revision=None, ) filenames = {sibling.rfilename for sibling in info.siblings} comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision) comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames] if set(model_filenames).issubset(set(comp_model_filenames)): warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning, ) else: warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.", FutureWarning, ) def _unwrap_model(model): """Unwraps a model.""" if is_compiled_module(model): model = model._orig_mod if is_peft_available(): from peft import PeftModel if isinstance(model, PeftModel): model = model.base_model.model return model def maybe_raise_or_warn( library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module ): """Simple helper method to raise or warn in case incorrect module has been passed""" if not is_pipeline_module: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} expected_class_obj = None for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): expected_class_obj = class_candidate # Dynamo wraps the original model in a private class. # I didn't find a public API to get the original class. sub_model = passed_class_obj[name] unwrapped_sub_model = _unwrap_model(sub_model) model_cls = unwrapped_sub_model.__class__ if not issubclass(model_cls, expected_class_obj): raise ValueError( f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}" ) else: logger.warning( f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" " has the correct type" ) def get_class_obj_and_candidates( library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=None, cache_dir=None ): """Simple helper method to retrieve class object of module as well as potential parent class objects""" component_folder = os.path.join(cache_dir, component_name) if is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = getattr(pipeline_module, class_name) class_candidates = {c: class_obj for c in importable_classes.keys()} elif os.path.isfile(os.path.join(component_folder, library_name + ".py")): # load custom component class_obj = get_class_from_dynamic_module( component_folder, module_file=library_name + ".py", class_name=class_name ) class_candidates = {c: class_obj for c in importable_classes.keys()} else: # else we just import it from the library. library = importlib.import_module(library_name) class_obj = getattr(library, class_name) class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} return class_obj, class_candidates def _get_pipeline_class( class_obj, config=None, load_connected_pipeline=False, custom_pipeline=None, repo_id=None, hub_revision=None, class_name=None, cache_dir=None, revision=None, ): if custom_pipeline is not None: if custom_pipeline.endswith(".py"): path = Path(custom_pipeline) # decompose into folder & file file_name = path.name custom_pipeline = path.parent.absolute() elif repo_id is not None: file_name = f"{custom_pipeline}.py" custom_pipeline = repo_id else: file_name = CUSTOM_PIPELINE_FILE_NAME if repo_id is not None and hub_revision is not None: # if we load the pipeline code from the Hub # make sure to overwrite the `revision` revision = hub_revision return get_class_from_dynamic_module( custom_pipeline, module_file=file_name, class_name=class_name, cache_dir=cache_dir, revision=revision, ) if class_obj.__name__ != "DiffusionPipeline": return class_obj diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0]) class_name = class_name or config["_class_name"] if not class_name: raise ValueError( "The class name could not be found in the configuration file. Please make sure to pass the correct `class_name`." ) class_name = class_name[4:] if class_name.startswith("Flax") else class_name pipeline_cls = getattr(diffusers_module, class_name) if load_connected_pipeline: from .auto_pipeline import _get_connected_pipeline connected_pipeline_cls = _get_connected_pipeline(pipeline_cls) if connected_pipeline_cls is not None: logger.info( f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`" ) else: logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.") pipeline_cls = connected_pipeline_cls or pipeline_cls return pipeline_cls def load_sub_model( library_name: str, class_name: str, importable_classes: List[Any], pipelines: Any, is_pipeline_module: bool, pipeline_class: Any, torch_dtype: torch.dtype, provider: Any, sess_options: Any, device_map: Optional[Union[Dict[str, torch.device], str]], max_memory: Optional[Dict[Union[int, str], Union[int, str]]], offload_folder: Optional[Union[str, os.PathLike]], offload_state_dict: bool, model_variants: Dict[str, str], name: str, from_flax: bool, variant: str, low_cpu_mem_usage: bool, cached_folder: Union[str, os.PathLike], ): """Helper method to load the module `name` from `library_name` and `class_name`""" # retrieve class candidates class_obj, class_candidates = get_class_obj_and_candidates( library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=name, cache_dir=cached_folder, ) load_method_name = None # retrieve load method name for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] # if load method name is None, then we have a dummy module -> raise Error if load_method_name is None: none_module = class_obj.__module__ is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith( TRANSFORMERS_DUMMY_MODULES_FOLDER ) if is_dummy_path and "dummy" in none_module: # call class_obj for nice error message of missing requirements class_obj() raise ValueError( f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have" f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}." ) load_method = getattr(class_obj, load_method_name) # add kwargs to loading method diffusers_module = importlib.import_module(__name__.split(".")[0]) loading_kwargs = {} if issubclass(class_obj, torch.nn.Module): loading_kwargs["torch_dtype"] = torch_dtype if issubclass(class_obj, diffusers_module.OnnxRuntimeModel): loading_kwargs["provider"] = provider loading_kwargs["sess_options"] = sess_options is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = "N/A" is_transformers_model = ( is_transformers_available() and issubclass(class_obj, PreTrainedModel) and transformers_version >= version.parse("4.20.0") ) # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers. # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default. # This makes sure that the weights won't be initialized which significantly speeds up loading. if is_diffusers_model or is_transformers_model: loading_kwargs["device_map"] = device_map loading_kwargs["max_memory"] = max_memory loading_kwargs["offload_folder"] = offload_folder loading_kwargs["offload_state_dict"] = offload_state_dict loading_kwargs["variant"] = model_variants.pop(name, None) if from_flax: loading_kwargs["from_flax"] = True # the following can be deleted once the minimum required `transformers` version # is higher than 4.27 if ( is_transformers_model and loading_kwargs["variant"] is not None and transformers_version < version.parse("4.27.0") ): raise ImportError( f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0" ) elif is_transformers_model and loading_kwargs["variant"] is None: loading_kwargs.pop("variant") # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage` if not (from_flax and is_transformers_model): loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage else: loading_kwargs["low_cpu_mem_usage"] = False # check if the module is in a subdirectory if os.path.isdir(os.path.join(cached_folder, name)): loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) else: # else load from the root directory loaded_sub_model = load_method(cached_folder, **loading_kwargs) return loaded_sub_model def _fetch_class_library_tuple(module): # import it here to avoid circular import diffusers_module = importlib.import_module(__name__.split(".")[0]) pipelines = getattr(diffusers_module, "pipelines") # register the config from the original module, not the dynamo compiled one not_compiled_module = _unwrap_model(module) library = not_compiled_module.__module__.split(".")[0] # check if the module is a pipeline module module_path_items = not_compiled_module.__module__.split(".") pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None path = not_compiled_module.__module__.split(".") is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) # if library is not in LOADABLE_CLASSES, then it is a custom module. # Or if it's a pipeline module, then the module is inside the pipeline # folder so we set the library to module name. if is_pipeline_module: library = pipeline_dir elif library not in LOADABLE_CLASSES: library = not_compiled_module.__module__ # retrieve class_name class_name = not_compiled_module.__class__.__name__ return (library, class_name)
diffusers/src/diffusers/pipelines/pipeline_loading_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/pipeline_loading_utils.py", "repo_id": "diffusers", "token_count": 8199 }
130
# Stable Diffusion ## Overview Stable Diffusion was proposed in [Stable Diffusion Announcement](https://stability.ai/blog/stable-diffusion-announcement) by Patrick Esser and Robin Rombach and the Stability AI team. The summary of the model is the following: *Stable Diffusion is a text-to-image model that will empower billions of people to create stunning art within seconds. It is a breakthrough in speed and quality meaning that it can run on consumer GPUs. You can see some of the amazing output that has been created by this model without pre or post-processing on this page. The model itself builds upon the work of the team at CompVis and Runway in their widely used latent diffusion model combined with insights from the conditional diffusion models by our lead generative AI developer Katherine Crowson, Dall-E 2 by Open AI, Imagen by Google Brain and many others. We are delighted that AI media generation is a cooperative field and hope it can continue this way to bring the gift of creativity to all.* ## Tips: - Stable Diffusion has the same architecture as [Latent Diffusion](https://arxiv.org/abs/2112.10752) but uses a frozen CLIP Text Encoder instead of training the text encoder jointly with the diffusion model. - An in-detail explanation of the Stable Diffusion model can be found under [Stable Diffusion with 🧨 Diffusers](https://huggingface.co/blog/stable_diffusion). - If you don't want to rely on the Hugging Face Hub and having to pass a authentication token, you can download the weights with `git lfs install; git clone https://huggingface.co/runwayml/stable-diffusion-v1-5` and instead pass the local path to the cloned folder to `from_pretrained` as shown below. - Stable Diffusion can work with a variety of different samplers as is shown below. ## Available Pipelines: | Pipeline | Tasks | Colab |---|---|:---:| | [pipeline_stable_diffusion.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) | *Text-to-Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | [pipeline_stable_diffusion_img2img](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) | *Image-to-Image Text-Guided Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) | [pipeline_stable_diffusion_inpaint](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | *Text-Guided Image Inpainting* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) ## Examples: ### Using Stable Diffusion without being logged into the Hub. If you want to download the model weights using a single Python line, you need to be logged in via `huggingface-cli login`. ```python from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") ``` This however can make it difficult to build applications on top of `diffusers` as you will always have to pass the token around. A potential way to solve this issue is by downloading the weights to a local path `"./stable-diffusion-v1-5"`: ``` git lfs install git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` and simply passing the local path to `from_pretrained`: ```python from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") ``` ### Text-to-Image with default PLMS scheduler ```python # make sure you're logged in with `huggingface-cli login` from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image.save("astronaut_rides_horse.png") ``` ### Text-to-Image with DDIM scheduler ```python # make sure you're logged in with `huggingface-cli login` from diffusers import StableDiffusionPipeline, DDIMScheduler scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", scheduler=scheduler, ).to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image.save("astronaut_rides_horse.png") ``` ### Text-to-Image with K-LMS scheduler ```python # make sure you're logged in with `huggingface-cli login` from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", scheduler=lms, ).to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image.save("astronaut_rides_horse.png") ``` ### CycleDiffusion using Stable Diffusion and DDIM scheduler ```python import requests import torch from PIL import Image from io import BytesIO from diffusers import CycleDiffusionPipeline, DDIMScheduler # load the scheduler. CycleDiffusion only supports stochastic schedulers. # load the pipeline # make sure you're logged in with `huggingface-cli login` model_id_or_path = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") # let's download an initial image url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image = init_image.resize((512, 512)) init_image.save("horse.png") # let's specify a prompt source_prompt = "An astronaut riding a horse" prompt = "An astronaut riding an elephant" # call the pipeline image = pipe( prompt=prompt, source_prompt=source_prompt, image=init_image, num_inference_steps=100, eta=0.1, strength=0.8, guidance_scale=2, source_guidance_scale=1, ).images[0] image.save("horse_to_elephant.png") # let's try another example # See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image = init_image.resize((512, 512)) init_image.save("black.png") source_prompt = "A black colored car" prompt = "A blue colored car" # call the pipeline torch.manual_seed(0) image = pipe( prompt=prompt, source_prompt=source_prompt, image=init_image, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, ).images[0] image.save("black_to_blue.png") ```
diffusers/src/diffusers/pipelines/stable_diffusion/README.md/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/README.md", "repo_id": "diffusers", "token_count": 2465 }
131
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_k_diffusion_available, is_k_diffusion_version, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not ( is_transformers_available() and is_torch_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) else: _import_structure["pipeline_stable_diffusion_k_diffusion"] = ["StableDiffusionKDiffusionPipeline"] _import_structure["pipeline_stable_diffusion_xl_k_diffusion"] = ["StableDiffusionXLKDiffusionPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not ( is_transformers_available() and is_torch_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 812 }
132
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline _dummy_objects.update( {"UnCLIPImageVariationPipeline": UnCLIPImageVariationPipeline, "UnCLIPPipeline": UnCLIPPipeline} ) else: _import_structure["pipeline_unclip"] = ["UnCLIPPipeline"] _import_structure["pipeline_unclip_image_variation"] = ["UnCLIPImageVariationPipeline"] _import_structure["text_proj"] = ["UnCLIPTextProjModel"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/unclip/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unclip/__init__.py", "repo_id": "diffusers", "token_count": 700 }
133
# Copyright (c) 2022 Pablo Pernías MIT License # Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class DDPMWuerstchenSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.FloatTensor def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DDPMWuerstchenScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and Langevin dynamics sampling. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2006.11239 Args: scaler (`float`): .... s (`float`): .... """ @register_to_config def __init__( self, scaler: float = 1.0, s: float = 0.008, ): self.scaler = scaler self.s = torch.tensor([s]) self._init_alpha_cumprod = torch.cos(self.s / (1 + self.s) * torch.pi * 0.5) ** 2 # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 def _alpha_cumprod(self, t, device): if self.scaler > 1: t = 1 - (1 - t) ** self.scaler elif self.scaler < 1: t = t**self.scaler alpha_cumprod = torch.cos( (t + self.s.to(device)) / (1 + self.s.to(device)) * torch.pi * 0.5 ) ** 2 / self._init_alpha_cumprod.to(device) return alpha_cumprod.clamp(0.0001, 0.9999) def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep Returns: `torch.FloatTensor`: scaled input sample """ return sample def set_timesteps( self, num_inference_steps: int = None, timesteps: Optional[List[int]] = None, device: Union[str, torch.device] = None, ): """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: num_inference_steps (`Dict[float, int]`): the number of diffusion steps used when generating samples with a pre-trained model. If passed, then `timesteps` must be `None`. device (`str` or `torch.device`, optional): the device to which the timesteps are moved to. {2 / 3: 20, 0.0: 10} """ if timesteps is None: timesteps = torch.linspace(1.0, 0.0, num_inference_steps + 1, device=device) if not isinstance(timesteps, torch.Tensor): timesteps = torch.Tensor(timesteps).to(device) self.timesteps = timesteps def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True, ) -> Union[DDPMWuerstchenSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMWuerstchenSchedulerOutput class Returns: [`DDPMWuerstchenSchedulerOutput`] or `tuple`: [`DDPMWuerstchenSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ dtype = model_output.dtype device = model_output.device t = timestep prev_t = self.previous_timestep(t) alpha_cumprod = self._alpha_cumprod(t, device).view(t.size(0), *[1 for _ in sample.shape[1:]]) alpha_cumprod_prev = self._alpha_cumprod(prev_t, device).view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) alpha = alpha_cumprod / alpha_cumprod_prev mu = (1.0 / alpha).sqrt() * (sample - (1 - alpha) * model_output / (1 - alpha_cumprod).sqrt()) std_noise = randn_tensor(mu.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) std = ((1 - alpha) * (1.0 - alpha_cumprod_prev) / (1.0 - alpha_cumprod)).sqrt() * std_noise pred = mu + std * (prev_t != 0).float().view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) if not return_dict: return (pred.to(dtype),) return DDPMWuerstchenSchedulerOutput(prev_sample=pred.to(dtype)) def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.FloatTensor, ) -> torch.FloatTensor: device = original_samples.device dtype = original_samples.dtype alpha_cumprod = self._alpha_cumprod(timesteps, device=device).view( timesteps.size(0), *[1 for _ in original_samples.shape[1:]] ) noisy_samples = alpha_cumprod.sqrt() * original_samples + (1 - alpha_cumprod).sqrt() * noise return noisy_samples.to(dtype=dtype) def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): index = (self.timesteps - timestep[0]).abs().argmin().item() prev_t = self.timesteps[index + 1][None].expand(timestep.shape[0]) return prev_t
diffusers/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py", "repo_id": "diffusers", "token_count": 3659 }
134
# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class KarrasVeSchedulerState: # setable values num_inference_steps: Optional[int] = None timesteps: Optional[jnp.ndarray] = None schedule: Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def create(cls): return cls() @dataclass class FlaxKarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. """ prev_sample: jnp.ndarray derivative: jnp.ndarray state: KarrasVeSchedulerState class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): """ Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and the VE column of Table 1 from [1] for reference. [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic differential equations." https://arxiv.org/abs/2011.13456 [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. Args: sigma_min (`float`): minimum noise magnitude sigma_max (`float`): maximum noise magnitude s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`): the parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`): the end value of the sigma range where we add noise. A reasonable range is [0.2, 80]. """ @property def has_state(self): return True @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): pass def create_state(self): return KarrasVeSchedulerState.create() def set_timesteps( self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> KarrasVeSchedulerState: """ Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=num_inference_steps, schedule=jnp.array(schedule, dtype=jnp.float32), timesteps=timesteps, ) def add_noise_to_input( self, state: KarrasVeSchedulerState, sample: jnp.ndarray, sigma: float, key: jax.Array, ) -> Tuple[jnp.ndarray, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. TODO Args: """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) key = random.split(key, num=1) eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def step_correct( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, sample_prev: jnp.ndarray, derivative: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Correct the predicted sample based on the output model_output of the network. TODO complete description Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO sample_prev (`torch.FloatTensor` or `np.ndarray`): TODO derivative (`torch.FloatTensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py", "repo_id": "diffusers", "token_count": 3954 }
135
import contextlib import copy import random from typing import Any, Dict, Iterable, List, Optional, Union import numpy as np import torch from .models import UNet2DConditionModel from .utils import ( convert_state_dict_to_diffusers, convert_state_dict_to_peft, deprecate, is_peft_available, is_torch_npu_available, is_torchvision_available, is_transformers_available, ) if is_transformers_available(): import transformers if is_peft_available(): from peft import set_peft_model_state_dict if is_torchvision_available(): from torchvision import transforms if is_torch_npu_available(): import torch_npu # noqa: F401 def set_seed(seed: int): """ Args: Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_torch_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available def compute_snr(noise_scheduler, timesteps): """ Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 """ alphas_cumprod = noise_scheduler.alphas_cumprod sqrt_alphas_cumprod = alphas_cumprod**0.5 sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 # Expand the tensors. # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026 sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] alpha = sqrt_alphas_cumprod.expand(timesteps.shape) sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) # Compute SNR. snr = (alpha / sigma) ** 2 return snr def resolve_interpolation_mode(interpolation_type: str): """ Maps a string describing an interpolation function to the corresponding torchvision `InterpolationMode` enum. The full list of supported enums is documented at https://pytorch.org/vision/0.9/transforms.html#torchvision.transforms.functional.InterpolationMode. Args: interpolation_type (`str`): A string describing an interpolation method. Currently, `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos` are supported, corresponding to the supported interpolation modes in torchvision. Returns: `torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize` transform. """ if not is_torchvision_available(): raise ImportError( "Please make sure to install `torchvision` to be able to use the `resolve_interpolation_mode()` function." ) if interpolation_type == "bilinear": interpolation_mode = transforms.InterpolationMode.BILINEAR elif interpolation_type == "bicubic": interpolation_mode = transforms.InterpolationMode.BICUBIC elif interpolation_type == "box": interpolation_mode = transforms.InterpolationMode.BOX elif interpolation_type == "nearest": interpolation_mode = transforms.InterpolationMode.NEAREST elif interpolation_type == "nearest_exact": interpolation_mode = transforms.InterpolationMode.NEAREST_EXACT elif interpolation_type == "hamming": interpolation_mode = transforms.InterpolationMode.HAMMING elif interpolation_type == "lanczos": interpolation_mode = transforms.InterpolationMode.LANCZOS else: raise ValueError( f"The given interpolation mode {interpolation_type} is not supported. Currently supported interpolation" f" modes are `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ) return interpolation_mode def unet_lora_state_dict(unet: UNet2DConditionModel) -> Dict[str, torch.Tensor]: r""" Returns: A state dict containing just the LoRA parameters. """ lora_state_dict = {} for name, module in unet.named_modules(): if hasattr(module, "set_lora_layer"): lora_layer = getattr(module, "lora_layer") if lora_layer is not None: current_lora_layer_sd = lora_layer.state_dict() for lora_layer_matrix_name, lora_param in current_lora_layer_sd.items(): # The matrix name can either be "down" or "up". lora_state_dict[f"{name}.lora.{lora_layer_matrix_name}"] = lora_param return lora_state_dict def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32): if not isinstance(model, list): model = [model] for m in model: for param in m.parameters(): # only upcast trainable parameters into fp32 if param.requires_grad: param.data = param.to(dtype) def _set_state_dict_into_text_encoder( lora_state_dict: Dict[str, torch.Tensor], prefix: str, text_encoder: torch.nn.Module ): """ Sets the `lora_state_dict` into `text_encoder` coming from `transformers`. Args: lora_state_dict: The state dictionary to be set. prefix: String identifier to retrieve the portion of the state dict that belongs to `text_encoder`. text_encoder: Where the `lora_state_dict` is to be set. """ text_encoder_state_dict = { f'{k.replace(prefix, "")}': v for k, v in lora_state_dict.items() if k.startswith(prefix) } text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict)) set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default") # Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 class EMAModel: """ Exponential Moving Average of models weights """ def __init__( self, parameters: Iterable[torch.nn.Parameter], decay: float = 0.9999, min_decay: float = 0.0, update_after_step: int = 0, use_ema_warmup: bool = False, inv_gamma: Union[float, int] = 1.0, power: Union[float, int] = 2 / 3, model_cls: Optional[Any] = None, model_config: Dict[str, Any] = None, **kwargs, ): """ Args: parameters (Iterable[torch.nn.Parameter]): The parameters to track. decay (float): The decay factor for the exponential moving average. min_decay (float): The minimum decay factor for the exponential moving average. update_after_step (int): The number of steps to wait before starting to update the EMA weights. use_ema_warmup (bool): Whether to use EMA warmup. inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1. Only used if `use_ema_warmup` is True. power (float): Exponential factor of EMA warmup. Default: 2/3. Only used if `use_ema_warmup` is True. device (Optional[Union[str, torch.device]]): The device to store the EMA weights on. If None, the EMA weights will be stored on CPU. @crowsonkb's notes on EMA Warmup: If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). """ if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility use_ema_warmup = True if kwargs.get("max_value", None) is not None: deprecation_message = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value", "1.0.0", deprecation_message, standard_warn=False) decay = kwargs["max_value"] if kwargs.get("min_value", None) is not None: deprecation_message = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value", "1.0.0", deprecation_message, standard_warn=False) min_decay = kwargs["min_value"] parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] if kwargs.get("device", None) is not None: deprecation_message = "The `device` argument is deprecated. Please use `to` instead." deprecate("device", "1.0.0", deprecation_message, standard_warn=False) self.to(device=kwargs["device"]) self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None # set in `step()` self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls) -> "EMAModel": _, ema_kwargs = model_cls.load_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path) ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.") if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.") model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop("shadow_params", None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: """ Compute the decay factor for the exponential moving average. """ step = max(0, optimization_step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** -self.power else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) # make sure decay is not smaller than min_decay cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value @torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() parameters = list(parameters) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay context_manager = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zero3_enabled(): import deepspeed for s_param, param in zip(self.shadow_params, parameters): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: """ Copy current averaged parameters into given collection of parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ parameters = list(parameters) for s_param, param in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def to(self, device=None, dtype=None) -> None: r"""Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` """ # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ] def state_dict(self) -> dict: r""" Returns the state of the ExponentialMovingAverage as a dict. This method is used by accelerate during checkpointing to save the ema state dict. """ # Following PyTorch conventions, references to tensors are returned: # "returns a reference to the state and not its copy!" - # https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Args: Save the current parameters for restoring later. parameters: Iterable of `torch.nn.Parameter`; the parameters to be temporarily stored. """ self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Args: Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without: affecting the original optimization process. Store the parameters before the `copy_to()` method. After validation (or model saving), use this to restore the former parameters. parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`") for c_param, param in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) # Better memory-wise. self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: r""" Args: Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the ema state dict. state_dict (dict): EMA state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = copy.deepcopy(state_dict) self.decay = state_dict.get("decay", self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1") self.min_decay = state_dict.get("min_decay", self.min_decay) if not isinstance(self.min_decay, float): raise ValueError("Invalid min_decay") self.optimization_step = state_dict.get("optimization_step", self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError("Invalid optimization_step") self.update_after_step = state_dict.get("update_after_step", self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError("Invalid update_after_step") self.use_ema_warmup = state_dict.get("use_ema_warmup", self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError("Invalid use_ema_warmup") self.inv_gamma = state_dict.get("inv_gamma", self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError("Invalid inv_gamma") self.power = state_dict.get("power", self.power) if not isinstance(self.power, (float, int)): raise ValueError("Invalid power") shadow_params = state_dict.get("shadow_params", None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError("shadow_params must be a list") if not all(isinstance(p, torch.Tensor) for p in self.shadow_params): raise ValueError("shadow_params must all be Tensors")
diffusers/src/diffusers/training_utils.py/0
{ "file_path": "diffusers/src/diffusers/training_utils.py", "repo_id": "diffusers", "token_count": 8024 }
136
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AltDiffusionImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AltDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AmusedImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AmusedInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AmusedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AnimateDiffPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AnimateDiffVideoToVideoPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AudioLDM2Pipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AudioLDM2ProjectionModel(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AudioLDM2UNet2DConditionModel(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class AudioLDMPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class CLIPImageProjection(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class CycleDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class I2VGenXLPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class IFImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class IFImg2ImgSuperResolutionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class IFInpaintingPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class IFInpaintingSuperResolutionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class IFPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class IFSuperResolutionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class ImageTextPipelineOutput(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class Kandinsky3Img2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class Kandinsky3Pipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyImg2ImgCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyInpaintCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyPriorPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22CombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22ControlnetImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22ControlnetPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22Img2ImgCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22Img2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22InpaintCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22InpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22Pipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22PriorEmb2EmbPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class KandinskyV22PriorPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class LatentConsistencyModelPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class LDMTextToImagePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class LEditsPPPipelineStableDiffusion(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class LEditsPPPipelineStableDiffusionXL(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class MusicLDMPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class PaintByExamplePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class PIAPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class PixArtAlphaPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class SemanticStableDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class ShapEImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class ShapEPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableCascadeCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableCascadeDecoderPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableCascadePriorPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionAdapterPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionAttendAndExcitePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionControlNetImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionControlNetInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionControlNetPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionDepth2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionDiffEditPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionGLIGENPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionGLIGENTextImagePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionImageVariationPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionInstructPix2PixPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionLatentUpscalePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionLDM3DPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionModelEditingPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionPanoramaPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionParadigmsPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionPipelineSafe(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionPix2PixZeroPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionSAGPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionUpscalePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLAdapterPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLControlNetInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLControlNetPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLInpaintPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLInstructPix2PixPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableDiffusionXLPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableUnCLIPImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableUnCLIPPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class StableVideoDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class TextToVideoSDPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class TextToVideoZeroPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class TextToVideoZeroSDXLPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class UnCLIPImageVariationPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class UnCLIPPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class UniDiffuserModel(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class UniDiffuserPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class UniDiffuserTextDecoder(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class VersatileDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class VideoToVideoSDPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class VQDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class WuerstchenCombinedPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class WuerstchenDecoderPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) class WuerstchenPriorPipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"])
diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py", "repo_id": "diffusers", "token_count": 19514 }
137
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import traceback import unittest import unittest.mock as mock import uuid from typing import Dict, List, Tuple import numpy as np import requests_mock import torch from huggingface_hub import ModelCard, delete_repo from huggingface_hub.utils import is_jinja_available from requests.exceptions import HTTPError from diffusers.models import UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor from diffusers.training_utils import EMAModel from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import ( CaptureLogger, require_python39_or_higher, require_torch_2, require_torch_accelerator_with_training, require_torch_gpu, run_test_in_subprocess, torch_device, ) from ..others.test_utils import TOKEN, USER, is_staging_test # Will be run via run_test_in_subprocess def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: init_dict, model_class = in_queue.get(timeout=timeout) model = model_class(**init_dict) model.to(torch_device) model = torch.compile(model) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = model_class.from_pretrained(tmpdirname) new_model.to(torch_device) assert new_model.__class__ == model_class except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ModelUtilsTest(unittest.TestCase): def tearDown(self): super().tearDown() def test_accelerate_loading_error_message(self): with self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet") # make sure that error message states what keys are missing assert "conv_out.bias" in str(error_context.exception) def test_cached_files_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. orig_model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True ) for p1, p2 in zip(orig_model.parameters(), model.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_one_request_upon_cached(self): # TODO: For some reason this test fails on MPS where no HEAD call is made. if torch_device == "mps": return use_safetensors = False with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 2, "2 HEAD requests one for config, one for model" assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) cache_requests = [r.method for r in m.request_history] assert ( "HEAD" == cache_requests[0] and len(cache_requests) == 1 ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_weight_overwrite(self): with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, ) # make sure that error message states what keys are missing assert "Cannot load" in str(error_context.exception) with tempfile.TemporaryDirectory() as tmpdirname: model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, ) assert model.config.in_channels == 9 class UNetTesterMixin: def test_forward_signature(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["sample", "timestep"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") class ModelTesterMixin: main_input_name = None # overwrite in model specific tester class base_precision = 1e-3 forward_requires_fresh_args = False def test_from_save_pretrained(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_getattr_is_correct(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) # save some things to test model.dummy_attribute = 5 model.register_to_config(test_attribute=5) logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "dummy_attribute") assert getattr(model, "dummy_attribute") == 5 assert model.dummy_attribute == 5 # no warning should be thrown assert cap_logger.out == "" logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "save_pretrained") fn = model.save_pretrained fn_1 = getattr(model, "save_pretrained") assert fn == fn_1 # no warning should be thrown assert cap_logger.out == "" # warning should be thrown with self.assertWarns(FutureWarning): assert model.test_attribute == 5 with self.assertWarns(FutureWarning): assert getattr(model, "test_attribute") == 5 with self.assertRaises(AttributeError) as error: model.does_not_exist assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'" @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_set_xformers_attn_processor_for_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output = model(**self.inputs_dict(0))[0] else: output = model(**inputs_dict)[0] model.enable_xformers_memory_efficient_attention() assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(XFormersAttnProcessor()) assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_3 = model(**self.inputs_dict(0))[0] else: output_3 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) assert torch.allclose(output, output_2, atol=self.base_precision) assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) @require_torch_gpu def test_set_attn_processor_for_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_1 = model(**self.inputs_dict(0))[0] else: output_1 = model(**inputs_dict)[0] model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor2_0()) assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_4 = model(**self.inputs_dict(0))[0] else: output_4 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor()) assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_5 = model(**self.inputs_dict(0))[0] else: output_5 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) # make sure that outputs match assert torch.allclose(output_2, output_1, atol=self.base_precision) assert torch.allclose(output_2, output_4, atol=self.base_precision) assert torch.allclose(output_2, output_5, atol=self.base_precision) def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") @require_python39_or_higher @require_torch_2 def test_from_save_pretrained_dynamo(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() inputs = [init_dict, self.model_class] run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs) def test_from_save_pretrained_dtype(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() for dtype in [torch.float32, torch.float16, torch.bfloat16]: if torch_device == "mps" and dtype == torch.bfloat16: continue with tempfile.TemporaryDirectory() as tmpdirname: model.to(dtype) model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype) assert new_model.dtype == dtype new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype) assert new_model.dtype == dtype def test_determinism(self, expected_max_diff=1e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: first = model(**self.inputs_dict(0)) else: first = model(**inputs_dict) if isinstance(first, dict): first = first.to_tuple()[0] if self.forward_requires_fresh_args: second = model(**self.inputs_dict(0)) else: second = model(**inputs_dict) if isinstance(second, dict): second = second.to_tuple()[0] out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, expected_max_diff) def test_output(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) # input & output have to have the same shape input_tensor = inputs_dict[self.main_input_name] expected_shape = input_tensor.shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_from_pretrained(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() # test if the model can be loaded from the config # and has all the expected shape with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) new_model.eval() # check if all parameters shape are the same for param_name in model.state_dict().keys(): param_1 = model.state_dict()[param_name] param_2 = new_model.state_dict()[param_name] self.assertEqual(param_1.shape, param_2.shape) with torch.no_grad(): output_1 = model(**inputs_dict) if isinstance(output_1, dict): output_1 = output_1.to_tuple()[0] output_2 = new_model(**inputs_dict) if isinstance(output_2, dict): output_2 = output_2.to_tuple()[0] self.assertEqual(output_1.shape, output_2.shape) @require_torch_accelerator_with_training def test_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() @require_torch_accelerator_with_training def test_ema_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() ema_model = EMAModel(model.parameters()) output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() ema_model.step(model.parameters()) def test_outputs_equivalence(self): def set_nan_tensor_to_zero(t): # Temporary fallback until `aten::_index_put_impl_` is implemented in mps # Track progress in https://github.com/pytorch/pytorch/issues/77764 device = t.device if device.type == "mps": t = t.to("cpu") t[t != t] = 0 return t.to(device) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: outputs_dict = model(**self.inputs_dict(0)) outputs_tuple = model(**self.inputs_dict(0), return_dict=False) else: outputs_dict = model(**inputs_dict) outputs_tuple = model(**inputs_dict, return_dict=False) recursive_check(outputs_tuple, outputs_dict) @require_torch_accelerator_with_training def test_enable_disable_gradient_checkpointing(self): if not self.model_class._supports_gradient_checkpointing: return # Skip test if model does not support gradient checkpointing init_dict, _ = self.prepare_init_args_and_inputs_for_common() # at init model should have gradient checkpointing disabled model = self.model_class(**init_dict) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.enable_gradient_checkpointing() self.assertTrue(model.is_gradient_checkpointing) # check disable works model.disable_gradient_checkpointing() self.assertFalse(model.is_gradient_checkpointing) def test_deprecated_kwargs(self): has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" " from `_deprecated_kwargs = [<deprecated_argument>]`" ) @is_staging_test class ModelPushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-model-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def test_push_to_hub(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) @unittest.skipIf( not is_jinja_available(), reason="Model card tests cannot be performed without Jinja installed.", ) def test_push_to_hub_library_name(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data assert model_card.library_name == "diffusers" # Reset repo delete_repo(self.repo_id, token=TOKEN)
diffusers/tests/models/test_modeling_common.py/0
{ "file_path": "diffusers/tests/models/test_modeling_common.py", "repo_id": "diffusers", "token_count": 13953 }
138
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers") class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): simple_backend = find_backend(" if not is_torch_available():") self.assertEqual(simple_backend, "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(double_backend, "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(triple_backend, "torch_and_transformers_and_onnx") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("torch_and_transformers", objects) self.assertIn("flax_and_transformers", objects) self.assertIn("torch_and_transformers_and_onnx", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel", objects["torch"]) self.assertIn("FlaxUNet2DConditionModel", objects["flax"]) self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
diffusers/tests/others/test_check_dummies.py/0
{ "file_path": "diffusers/tests/others/test_check_dummies.py", "repo_id": "diffusers", "token_count": 1872 }
139
import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffVideoToVideoPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, UNet2DConditionModel, UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffVideoToVideoPipelineFastTests(IPAdapterTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = AnimateDiffVideoToVideoPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = VIDEO_TO_VIDEO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=(32, 64), motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) video_height = 32 video_width = 32 video_num_frames = 2 video = [Image.new("RGB", (video_width, video_height))] * video_num_frames inputs = { "video": video, "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffVideoToVideoPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device) pipe(**inputs) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") def test_free_init(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] pipe.enable_free_init( num_iters=2, use_fast_sampling=True, method="butterworth", order=4, spatial_stop_frequency=0.25, temporal_stop_frequency=0.25, ) inputs_enable_free_init = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] pipe.disable_free_init() inputs_disable_free_init = self.get_dummy_inputs(torch_device) frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeInit should lead to results similar to the default pipeline results", )
diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py/0
{ "file_path": "diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py", "repo_id": "diffusers", "token_count": 5211 }
140
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLImg2ImgFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, skip_first_text_encoder=False): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): controlnet_embedder_scale_factor = 2 image = floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": image, } return inputs def test_stable_diffusion_xl_controlnet_img2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_controlnet_img2img_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] assert output.images.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py", "repo_id": "diffusers", "token_count": 6012 }
141
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DiTPipeline params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) vae = AutoencoderKL() scheduler = DDIMScheduler() components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @nightly @require_torch_gpu class DiTPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dit_256(self): generator = torch.manual_seed(0) pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to("cuda") words = ["vase", "umbrella", "white shark", "white wolf"] ids = pipe.get_label_ids(words) images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max()) < 1e-2 def test_dit_512(self): pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") words = ["vase", "umbrella"] ids = pipe.get_label_ids(words) generator = torch.manual_seed(0) images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max()) < 1e-1
diffusers/tests/pipelines/dit/test_dit.py/0
{ "file_path": "diffusers/tests/pipelines/dit/test_dit.py", "repo_id": "diffusers", "token_count": 2337 }
142
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, MusicLDMPipeline, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = MusicLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=32 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": feature_extractor, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_musicldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = musicldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0020, -0.0035, -0.0019, -0.0037, -0.0020, -0.0038, -0.0019] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = musicldm_pipe.tokenizer( prompt, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = musicldm_pipe.text_encoder.get_text_features(text_inputs) inputs["prompt_embeds"] = prompt_embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = musicldm_pipe.tokenizer( p, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = musicldm_pipe.text_encoder.get_text_features( text_inputs, ) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = musicldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0019, -0.0035, -0.0018, -0.0037, -0.0021, -0.0038, -0.0018] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = musicldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = musicldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = musicldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = musicldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_musicldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = musicldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = musicldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = musicldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_musicldm_vocoder_model_in_dim(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = musicldm_pipe.vocoder.config config.model_in_dim *= 2 musicldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) @nightly @require_torch_gpu class MusicLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_musicldm(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8680:8690] expected_slice = np.array( [-0.1042, -0.1068, -0.1235, -0.1387, -0.1428, -0.136, -0.1213, -0.1097, -0.0967, -0.0945] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_musicldm_lms(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe.scheduler = LMSDiscreteScheduler.from_config(musicldm_pipe.scheduler.config) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[58020:58030] expected_slice = np.array([0.3592, 0.3477, 0.4084, 0.4665, 0.5048, 0.5891, 0.6461, 0.5579, 0.4595, 0.4403]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
diffusers/tests/pipelines/musicldm/test_musicldm.py/0
{ "file_path": "diffusers/tests/pipelines/musicldm/test_musicldm.py", "repo_id": "diffusers", "token_count": 7995 }
143
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, StableCascadeCombinedPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableCascadeCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableCascadeCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "conditioning_dim": 128, "block_out_channels": (128, 128), "num_attention_heads": (2, 2), "down_num_layers_per_block": (1, 1), "up_num_layers_per_block": (1, 1), "clip_image_in_channels": 768, "switch_level": (False,), "clip_text_in_channels": self.text_embedder_hidden_size, "clip_text_pooled_in_channels": self.text_embedder_hidden_size, } model = StableCascadeUNet(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, "out_channels": 4, "conditioning_dim": 128, "block_out_channels": (16, 32, 64, 128), "num_attention_heads": (-1, -1, 1, 2), "down_num_layers_per_block": (1, 1, 1, 1), "up_num_layers_per_block": (1, 1, 1, 1), "down_blocks_repeat_mappers": (1, 1, 1, 1), "up_blocks_repeat_mappers": (3, 3, 2, 2), "block_types_per_layer": ( ("SDCascadeResBlock", "SDCascadeTimestepBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ), "switch_level": None, "clip_text_pooled_in_channels": 32, "dropout": (0.1, 0.1, 0.1, 0.1), } model = StableCascadeUNet(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan prior_text_encoder = self.dummy_text_encoder prior_tokenizer = self.dummy_tokenizer components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "decoder": decoder, "scheduler": scheduler, "vqgan": vqgan, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": prior_tokenizer, "prior_prior": prior, "prior_scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_stable_cascade(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=2e-2) @unittest.skip(reason="fp16 not supported") def test_float16_inference(self): super().test_float16_inference() @unittest.skip(reason="no callback test for combined pipeline") def test_callback_inputs(self): super().test_callback_inputs() def test_stable_cascade_combined_prompt_embeds(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeCombinedPipeline(**components) pipe.set_progress_bar_config(disable=None) prompt = "A photograph of a shiba inu, wearing a hat" ( prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled, ) = pipe.prior_pipe.encode_prompt(device, 1, 1, False, prompt=prompt) generator = torch.Generator(device=device) output_prompt = pipe( prompt=prompt, num_inference_steps=1, prior_num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) output_prompt_embeds = pipe( prompt=None, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, num_inference_steps=1, prior_num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) assert np.abs(output_prompt.images - output_prompt_embeds.images).max() < 1e-5
diffusers/tests/pipelines/stable_cascade/test_stable_cascade_combined.py/0
{ "file_path": "diffusers/tests/pipelines/stable_cascade/test_stable_cascade_combined.py", "repo_id": "diffusers", "token_count": 4525 }
144
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, numpy_cosine_similarity_distance, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionDiffEditPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) inverse_scheduler = DDIMInverseScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_zero=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): mask = floats_tensor((1, 16, 16), rng=random.Random(seed)).to(device) latents = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "np", } return inputs def get_dummy_mask_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "np", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "np", } return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_mask(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_mask_inputs(device) mask = pipe.generate_mask(**inputs) mask_slice = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16)) expected_slice = np.array([0] * 9) max_diff = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) self.assertEqual(mask[0, -3, -4], 0) def test_inversion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5160, 0.5115, 0.5060, 0.5456, 0.4704, 0.5060, 0.5019, 0.4405, 0.4726], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_inversion_dpm(self): device = "cpu" components = self.get_dummy_components() scheduler_args = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"} components["scheduler"] = DPMSolverMultistepScheduler(**scheduler_args) components["inverse_scheduler"] = DPMSolverMultistepInverseScheduler(**scheduler_args) pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5305, 0.4673, 0.5314, 0.5308, 0.4886, 0.5279, 0.5142, 0.4724, 0.4892], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @require_torch_gpu @nightly class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((256, 256)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_full(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.scheduler.clip_sample = True pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=5, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=5, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((256, 256)) ) / 255 ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 2e-1 @nightly @require_torch_gpu class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((768, 768)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_dpm(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.inverse_scheduler = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=25, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=25, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768)) ) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py", "repo_id": "diffusers", "token_count": 7240 }
145
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() @nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_1(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_2(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1 def test_stable_diffusion_karras_sigmas(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_2m") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=15, output_type="np", use_karras_sigmas=True, ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_noise_sampler_seed(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_sde") prompt = "A painting of a squirrel eating a burger" seed = 0 images1 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images images2 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images assert images1.shape == (1, 512, 512, 3) assert images2.shape == (1, 512, 512, 3) assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_k_diffusion/test_stable_diffusion_k_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_k_diffusion/test_stable_diffusion_k_diffusion.py", "repo_id": "diffusers", "token_count": 2113 }
146
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNet3DConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, nightly, numpy_cosine_similarity_distance, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = VideoToVideoSDPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} test_attention_slicing = False # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=True, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[ 8, ], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): # 3 frames video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = VideoToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][0][-3:, -3:, -1] assert frames[0][0].shape == (32, 32, 3) expected_slice = np.array([0.6391, 0.5350, 0.5202, 0.5521, 0.5453, 0.5393, 0.6652, 0.5270, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @is_flaky() def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=0.001) @is_flaky() def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent() @is_flaky() def test_save_load_local(self): super().test_save_load_local() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @nightly @skip_mps class VideoToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() # 10 frames generator = torch.Generator(device="cpu").manual_seed(0) video = torch.randn((1, 10, 3, 320, 576), generator=generator) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="np").frames expected_array = np.array( [0.17114258, 0.13720703, 0.08886719, 0.14819336, 0.1730957, 0.24584961, 0.22021484, 0.35180664, 0.2607422] ) output_array = video_frames[0, 0, :3, :3, 0].flatten() assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-3
diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py/0
{ "file_path": "diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py", "repo_id": "diffusers", "token_count": 3465 }
147
# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class DDPMParallelSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDPMParallelScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_batch_step_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample1 = self.dummy_sample_deter sample2 = self.dummy_sample_deter + 0.1 sample3 = self.dummy_sample_deter - 0.1 per_sample_batch = sample1.shape[0] samples = torch.stack([sample1, sample2, sample3], dim=0) timesteps = torch.arange(num_trained_timesteps)[0:3, None].repeat(1, per_sample_batch) residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1)) pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1)) result_sum = torch.sum(torch.abs(pred_prev_sample)) result_mean = torch.mean(torch.abs(pred_prev_sample)) assert abs(result_sum.item() - 1153.1833) < 1e-2 assert abs(result_mean.item() - 0.5005) < 1e-3 def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) t_start = num_trained_timesteps - 2 model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_ddpm_parallel.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ddpm_parallel.py", "repo_id": "diffusers", "token_count": 4271 }
148
import torch from diffusers import LMSDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class LMSDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (LMSDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 800]: self.check_over_forward(time_step=t) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 1006.388) < 1e-2 assert abs(result_mean.item() - 1.31) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 0.0017) < 1e-2 assert abs(result_mean.item() - 2.2676e-06) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 1006.388) < 1e-2 assert abs(result_mean.item() - 1.31) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 3812.9927) < 2e-2 assert abs(result_mean.item() - 4.9648) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise t_start = self.num_inference_steps - 2 noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 27663.6895) < 1e-2 assert abs(result_mean.item() - 36.0204) < 1e-3
diffusers/tests/schedulers/test_scheduler_lms.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_lms.py", "repo_id": "diffusers", "token_count": 2696 }
149
# JAX/Diffusers community sprint Welcome to the JAX/Diffusers community sprint! The goal of this sprint is to work on fun and creative diffusion models using JAX and Diffusers. In this event, we will create various applications with diffusion models in JAX/Flax and Diffusers using free TPU hours generously provided by Google Cloud. This document is a walkthrough on all the important information to make a submission to the JAX/Diffusers community sprint. Don't forget to fill out the [signup form]! > 💡 Note: This document is still WIP and it only contains initial details of the event. We will keep updating this document as we make other relevant information available throughout the community sprint. ## Organization Participants can propose ideas for an interesting project involving diffusion models. Teams of 3 to 5 will then be formed around the most promising and interesting projects. Make sure to read through the [Communication](#communication) section on how to propose projects, comment on other participants' project ideas, and create a team. To help each team successfully finish their project, we will organize talks by leading scientists and engineers from Google, Hugging Face, and the open-source diffusion community. The talks will take place on 17th of April. Make sure to attend the talks to get the most out of your participation! Check out the [Talks](#talks) section to get an overview of the talks, including the speaker and the time of the talk. Each team is then given **free access to a TPU v4-8 VM** from April 14 to May 1st. In addition, we will provide a training example in JAX/Flax and Diffusers to train [ControlNets](https://huggingface.co/blog/controlnet) to kick-start your project. We will also provide examples of how to prepare datasets for ControlNet training. During the sprint, we'll make sure to answer any questions you might have about JAX/Flax and Diffusers and help each team as much as possible to complete their projects! > 💡 Note: We will not be distributing TPUs for single member teams, so you are encouraged to either join a team or find teammates for your idea. At the end of the community sprint, each submission will be evaluated by a jury and the top-3 demos will be awarded a prize. Check out the [How to submit a demo] (TODO) section for more information and suggestions on how to submit your project. > 💡 Note: Even though we provide an example for performing ControlNet training, participants can propose ideas that do not involve ControlNets at all. But the ideas need to be centered around diffusion models. ## Important dates - **29.03.** Official announcement of the community week. Make sure to fill out the [signup form]. - **31.03.** Start forming groups in #jax-diffusers-ideas channel in Discord. - **10.04.** Data collection. - **13.04. - 14.04. - [17.04.](https://www.youtube.com/watch?v=SOj2sxgvFe0)** Kick-off event with talks on Youtube. - **14.04. - 17.04.** Start providing access to TPUs. - **01.05.** Shutdown access to TPUs. - **08.05.**: Announcement of the top 10 projects and prizes. > 💡 Note: We will be accepting applications throughout the sprint. ## Communication All important communication will take place on our Discord server. Join the server using [this link](https://hf.co/join/discord). After you join the server, take the Diffusers role in `#role-assignment` channel and head to `#jax-diffusers-ideas` channel to share your idea as a forum post. To sign up for participation, fill out the [signup form] and we will give you access to two more Discord channels on discussions and technical support, and access to TPUs. Important announcements of the Hugging Face, Flax/JAX, and Google Cloud team will be posted in the server. The Discord server will be the central place for participants to post about their results, share their learning experiences, ask questions and get technical support in various obstacles they encounter. For issues with Flax/JAX, Diffusers, Datasets or for questions that are specific to your project we will be interacting through public repositories and forums: - Flax: [Issues](https://github.com/google/flax/issues), [Questions](https://github.com/google/flax/discussions) - JAX: [Issues](https://github.com/google/jax/issues), [Questions](https://github.com/google/jax/discussions) - 🤗 Diffusers: [Issues](https://github.com/huggingface/diffusers/issues), [Questions](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) - 🤗 Datasets: [Issues](https://github.com/huggingface/datasets/issues), [Questions](https://discuss.huggingface.co/c/datasets/10) - Project specific questions: Can be asked from each project's own post on #jax-diffusers-ideas channel on Discord. - TPU related questions: `#jax-diffusers-tpu-support` channel on Discord. - General discussion: `#jax-diffusers-sprint channel` on Discord. You will get access to `#jax-diffusers-tpu-support` and `#jax-diffusers-sprint` once you are accepted to attend the sprint. When asking for help, we encourage you to post the link to [forum](https://discuss.huggingface.co) post to the Discord server, instead of directly posting issues or questions. This way, we make sure that the everybody in the community can benefit from your questions, even after the community sprint. > 💡 Note: After 10th of April, if you have signed up on the google form, but you are not in the Discord channel, please leave a message on [the official forum announcement](https://discuss.huggingface.co/t/controlling-stable-diffusion-with-jax-and-diffusers-using-v4-tpus/35187/2) and ping `@mervenoyan`, `@sayakpaul`, and `@patrickvonplaten`. We might take a day to process these requests. ## Talks We have invited prominent researchers and engineers from Google, Hugging Face, and the open-source community who are working in the Generative AI space. We will update this section with links to the talks, so keep an eye here or on Discord in diffusion models core-announcements channel and set your reminders! ### **April 13, 2023** | Speaker | Topic | Time | Video | |---|---|---|---| [Emiel Hoogeboom, Google Brain](https://twitter.com/emiel_hoogeboom?lang=en) | Pixel-Space Diffusion models for High Resolution Images | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | | [Apolinário Passos, Hugging Face](https://twitter.com/multimodalart?lang=en) | Introduction to Diffusers library | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | [Ting Chen, Google Brain](https://twitter.com/tingchenai?lang=en) | Diffusion++: discrete data and high-dimensional generation | 5.45pm-6.25pm CEST / 08.45am-09.25am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=iw2WCAGxdQ4) | ### **April 14, 2023** | Speaker | Topic | Time | Video | |---|---|---|---| | [Tim Salimans, Google Brain](https://twitter.com/timsalimans?lang=en) | Efficient image and video generation with distilled diffusion models | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | | [Suraj Patil, Hugging Face](https://twitter.com/psuraj28?lang=en) | Masked Generative Models: MaskGIT/Muse | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | | [Sabrina Mielke, John Hopkins University](https://twitter.com/sjmielke?lang=en) | From stateful code to purified JAX: how to build your neural net framework | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=6f5chgbKjSg&ab_channel=HuggingFace) | ### **April 17, 2023** | Speaker | Topic | Time | Video | |---|---|---|---| | [Andreas Steiner, Google Brain](https://twitter.com/AndreasPSteiner) | JAX & ControlNet | 4.00pm-4.40pm CEST / 7.00am-7.40am PST| [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | | [Boris Dayma, craiyon](https://twitter.com/borisdayma?lang=en) | DALL-E Mini | 4.40pm-5.20pm CEST / 7.40am-08.20am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | | [Margaret Mitchell, Hugging Face](https://twitter.com/mmitchell_ai?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor) | Ethics of Text-to-Image | 5.20pm-6.00pm CEST / 08.20am-09.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=SOj2sxgvFe0) | [signup form]: https://forms.gle/t3M7aNPuLL9V1sfa9 ## Data and Pre-Processing In this section, we will cover how to build your own dataset for ControlNet training. ### Prepare a large local dataset #### Mount a disk If you need extra space, you can follow [this guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#prerequisites) to create a persistent disk, attach it to your TPU VM, and create a directory to mount the disk. You can then use this directory to store your dataset. As a side note, the TPU VM allocated to your team has a 3 TB persistent storage drive attached to it. To learn how to use it, check out [this guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#mount-pd). #### Data preprocessing Here we demonstrate how to prepare a large dataset to train a ControlNet model with canny edge detection. More specifically, we provide an [example script](./dataset_tools/coyo_1m_dataset_preprocess.py) that: * Selects 1 million image-text pairs from an existing dataset [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m). * Downloads each image and use Canny edge detector to generate the conditioning image. * Create a metafile that links all the images and processed images to their text captions. Use the following command to run the example data preprocessing script. If you've mounted a disk to your TPU, you should place your `train_data_dir` and `cache_dir` on the mounted disk ```bash python3 coyo_1m_dataset_preprocess.py \ --train_data_dir="/mnt/disks/persist/data" \ --cache_dir="/mnt/disks/persist" \ --max_train_samples=1000000 \ --num_proc=16 ``` Once the script finishes running, you can find a data folder at the specified `train_data_dir` with the below folder structure: ``` data ├── images │ ├── image_1.png │ ├── ....... │ └── image_1000000.jpeg ├── processed_images │ ├── image_1.png │ ├── ....... │ └── image_1000000.jpeg └── meta.jsonl ``` #### Load dataset To load a dataset from the data folder you just created, you should add a dataset loading script to your data folder. The dataset loading script should have the same name as the folder. For example, if your data folder is `data`, you should add a data loading script named `data.py`. We provide an [example data loading script](./dataset_tools/data.py) for you to use. All you need to do is to update the `DATA_DIR` with the correct path to your data folder. For more details about how to write a dataset loading script, refer to the [documentation](https://huggingface.co/docs/datasets/dataset_script). Once the dataset loading script is added to your data folder, you can load it with: ```python dataset = load_dataset("/mnt/disks/persist/data", cache_dir="/mnt/disks/persist" ) ``` Note that you can use the `--train_data_dir` flag to pass your data folder directory to the training script and generate your dataset automatically during the training. For large datasets, we recommend generating the dataset once and saving it on the disk with ```python dataset.save_to_disk("/mnt/disks/persist/dataset") ``` You can then reuse the saved dataset for your training by passing the `--load_from_disk` flag. Here is an example to run a training script that will load the dataset from the disk ```python export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="/mnt/disks/persist/canny_model" export DATASET_DIR="/mnt/disks/persist/dataset" export DISK_DIR="/mnt/disks/persist" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --train_data_dir=$DATASET_DIR \ --load_from_disk \ --cache_dir=$DISK_DIR \ --resolution=512 \ --learning_rate=1e-5 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --max_train_steps=500000 \ --checkpointing_steps=10000 \ --dataloader_num_workers=16 ``` ### Prepare a dataset with MediaPipe and Hugging Face We provide a notebook ([![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/community-events/blob/main/jax-controlnet-sprint/dataset_tools/create_pose_dataset.ipynb)) that shows you how to prepare a dataset for ControlNet training using [MediaPipe](https://developers.google.com/mediapipe) and Hugging Face. Specifically, in the notebook, we show: * How to leverage MediaPipe solutions to extract pose body joints from the input images. * Predict captions using BLIP-2 from the input images using 🤗 Transformers. * Build and push the final dataset to the Hugging Face Hub using 🤗 Datasets. You can refer to the notebook to create your own datasets using other MediaPipe solutions as well. Below, we list all the relevant ones: * [Pose Landmark Detection](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker) * [Face Landmark Detection](https://developers.google.com/mediapipe/solutions/vision/face_landmarker) * [Selfie Segmentation](https://developers.google.com/mediapipe/solutions/vision/image_segmenter) ## Training ControlNet This is perhaps the most fun and interesting part of this document as here we show you how to train a custom ControlNet model. > 💡 Note: For this sprint, you are NOT restricted to just training ControlNets. We provide this training script as a reference for you to get started. For faster training on TPUs and GPUs you can leverage the Flax training example. Follow the instructions above to get the model and dataset before running the script. ### Setting up your TPU VM _Before proceeding with the rest of this section, you must ensure that the email address you're using has been added to the `hf-flax` project on Google Cloud Platform. If it's not the case, please let us know in the Discord server (you can tag `@sayakpaul`, `@merve`, and `@patrickvonplaten`)._ In the following, we will describe how to do so using a standard console, but you should also be able to connect to the TPU VM via IDEs, like Visual Studio Code, etc. 1. You need to install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/install). Please follow the instructions on https://cloud.google.com/sdk. 2. Once you've installed the Google Cloud SDK, you should set your account by running the following command. Make sure that <your-email-address> corresponds to the gmail address you used to sign up for this event. ```bash gcloud config set account <your-email-adress> ``` 3. Let's also make sure the correct project is set in case your email is used for multiple gcloud projects: ```bash gcloud config set project hf-flax ``` 4. Next, you will need to authenticate yourself. You can do so by running: ```bash gcloud auth login ``` This should give you a link to a website, where you can authenticate your gmail account. 5. Finally, you can establish an SSH tunnel into the TPU VM! Please run the following command by setting`--zone` to `us-central2-b` and to the TPU name also sent to you via email from the Hugging Face team. ```bash gcloud alpha compute tpus tpu-vm ssh <tpu-name> --zone <zone> --project hf-flax ``` This should establish an SSH tunnel into the TPU VM! > 💡 Note: You are NOT supposed to have access to the Google Cloud console. Also, you might not get an invitation link to join the `hf-flax` project. But you should still be able to access the TPU VM following the above steps. > 💡 Note: The TPU VMs are already attached to persistent storage drives (of 3 TB). This will be helpful in case your team wants to perform training on a large dataset locally. The disk name of the storage drive should also be present in the email you received. Follow [this section](https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint#mount-a-disk) for more details. ### Installing JAX Let's first create a Python virtual environment: ```bash python3 -m venv <your-venv-name> ``` We can activate the environment by running: ```bash source ~/<your-venv-name>/bin/activate ``` Then install Diffusers and the library's training dependencies: ```bash pip install git+https://github.com/huggingface/diffusers.git ``` Then clone this repository and install JAX, Flax and the other dependencies: ```bash git clone https://github.com/huggingface/community-events cd community-events/jax-controlnet-sprint/training_scripts pip install -U -r requirements_flax.txt ``` To verify that JAX was correctly installed, you can run the following command: ```python import jax jax.device_count() ``` This should display the number of TPU cores, which should be 4 on a TPUv4-8 VM. If Python is not able to detect the TPU device, please take a look at [this section](#troubleshoot-your-tpu-vm) for solutions. If you want to use Weights and Biases logging, you should also install `wandb` now: ```bash pip install wandb ``` > 💡 Note: Weights & Biases is free for students, educators, and academic researchers. All participants of our event are qualified to get an academic Weights & Biases team account. To create your team, you can visit https://wandb.ai/create-team and choose the team type to be "Academic". For more information regarding creation and management of Weights & Biases team, you can checkout https://docs.wandb.ai/guides/app/features/teams. ### Running the training script Now let's download two conditioning images that we will use to run validation during the training in order to track our progress ```bash wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` We encourage you to store or share your model with the community. To use Hugging Face hub, please login to your Hugging Face account, or ([create one](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) if you don’t have one already): ```bash huggingface-cli login ``` Make sure you have the `MODEL_DIR`,`OUTPUT_DIR` and `HUB_MODEL_ID` environment variables set. The `OUTPUT_DIR` and `HUB_MODEL_ID` variables specify where to save the model to on the Hub: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/fill-circle-{timestamp}" export HUB_MODEL_ID="controlnet-fill-circle" ``` And finally start the training (make sure you're in the `jax-controlnet-sprint/training_scripts` directory)! ```bash python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=1000 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID \ --num_train_epochs=11 \ --push_to_hub \ --hub_model_id=$HUB_MODEL_ID ``` Note that `--from_pt` argument will convert your pytorch checkpoint into flax. However, it will only work with checkpoints in diffusers format. If your `MODEL_DIR` does not contain checkpoints in diffusers format, you cannot use the `--from_pt` argument. You can convert your `ckpt` or `safetensors` checkpoints into diffusers format using [this script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py). Since we passed the `--push_to_hub` flag, it will automatically create a model repo under your Hugging Face account based on `$HUB_MODEL_ID`. By the end of training, the final checkpoint will be automatically stored on the hub. You can find an example model repo [here](https://huggingface.co/YiYiXu/fill-circle-controlnet). Our training script also provides limited support for streaming large datasets from the Hugging Face Hub. In order to enable streaming, one must also set `--max_train_samples`. Here is an example command (from [this blog article](https://huggingface.co/blog/train-your-controlnet)): ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/uncanny-faces-{timestamp}" export HUB_MODEL_ID="controlnet-uncanny-faces" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=multimodalart/facesyntheticsspigacaptioned \ --streaming \ --conditioning_image_column=spiga_seg \ --image_column=image \ --caption_column=image_caption \ --resolution=512 \ --max_train_samples 100000 \ --learning_rate=1e-5 \ --train_batch_size=1 \ --revision="flax" \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID ``` Note, however, that the performance of the TPUs might get bottlenecked as streaming with `datasets` is not optimized for images. For ensuring maximum throughput, we encourage you to explore the following options: * [Webdataset](https://webdataset.github.io/webdataset/) * [TorchData](https://github.com/pytorch/data) * [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds) When work with a larger dataset, you may need to run training process for a long time and it’s useful to save regular checkpoints during the process. You can use the following argument to enable intermediate checkpointing: ```bash --checkpointing_steps=500 ``` This will save the trained model in subfolders of your output_dir. Subfolder names is the number of steps performed so far; for example: a checkpoint saved after 500 training steps would be saved in a subfolder named 500 You can then start your training from this saved checkpoint with ```bash --controlnet_model_name_or_path="./control_out/500" ``` We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence by rebalancing the loss. To use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is `5.0`. We also support gradient accumulation - it is a technique that lets you use a bigger batch size than your machine would normally be able to fit into memory. You can use `gradient_accumulation_steps` argument to set gradient accumulation steps. The ControlNet author recommends using gradient accumulation to achieve better convergence. Read more [here](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation). You can **profile your code** with: ```bash --profile_steps==5 ``` Refer to the [JAX documentation on profiling](https://jax.readthedocs.io/en/latest/profiling.html). To inspect the profile trace, you'll have to install and start Tensorboard with the profile plugin: ```bash pip install tensorflow tensorboard-plugin-profile tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ ``` The profile can then be inspected at http://localhost:6006/#profile Sometimes you'll get version conflicts (error messages like `Duplicate plugins for name projector`), which means that you have to uninstall and reinstall all versions of Tensorflow/Tensorboard (e.g. with `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`). Note that the debugging functionality of the Tensorboard `profile` plugin is still under active development. Not all views are fully functional, and for example the `trace_viewer` cuts off events after 1M (which can result in all your device traces getting lost if you for example profile the compilation step by accident). ### Troubleshoot your TPU VM **VERY IMPORTANT** - Only one process can access the TPU cores at a time. This means that if multiple team members are trying to connect to the TPU cores, you will get errors such as: ``` libtpu.so already in used by another process. Not attempting to load libtpu.so in this process. ``` We recommend every team member create her/his own virtual environment, but only one person should run the heavy training processes. Also, please take turns when setting up the TPUv4-8 so that everybody can verify that JAX is correctly installed. If your team members are not currently using the TPU but you still get this error message. You should kill the process that is using the TPU with ``` kill -9 PID ``` you will need to replace the term “PID” with the PID of the process that uses TPU. In most cases, this information is included in the error message. For example, if you get ``` The TPU is already in use by a process with pid 1378725. Not attempting to load libtpu.so in this process. ``` you can do ``` kill -9 1378725 ``` You can also use the below command to find processes using each of the TPU chips (e.g. `/dev/accel0` is one of the TPU chips) ``` sudo lsof -w /dev/accel0 ``` To kill all the processes using `/dev/accel0` ``` sudo lsof -t /dev/accel0 | xargs kill -9 ``` If Python is not able to detect your TPU device (i.e. when you do `jax.device_count()` and it outputs `0`), it might be because you have no rights to access the tpu logs, or you have a dangling tpu lock file. Run these commands usually fix the issue ``` sudo rm -f /tmp/libtpu_lockfile ``` ``` sudo chmod o+w /tmp/tpu_logs/ ``` <div id="how-to-make-a-submission"> <h2> How to Make a Submission </h2> </div> To make a full submission, you need to have the following on Hugging Face Hub: - Model repository with model weights and model card, - (Optional) Dataset repository with dataset card, - A Hugging Face Space that lets others interact with your model. ### Pushing model weights and the model card to Hub **If you are using the training script (`train_controlnet_flax.py`) provided in this directory** Enabling `push_to_hub` argument in the training arguments will: - Create a model repository locally, and remotely on Hugging Face Hub, - Create a model card and write it to the local model repository, - Save your model to the local model repository, - Push the local repository to Hugging Face Hub. Your automatically generated model card will look like below 👇 ![Model Card](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/jax_model_card.png). You can edit the model card to be more informative. Model cards that are more informative than the others will carry more weight during evaluation. **If you have trained a custom model and not used the script** You need to authenticate yourself with `huggingface-cli login` as instructed above. If you are using one of the available model classes from `diffusers`, save your model with `save_pretrained` method of your model. ```python model.save_pretrained("path_to_your_model_repository") ``` After saving your model to a folder, you can simply use below script to push your model to the Hub 👇 ```python from huggingface_hub import create_repo, upload_folder create_repo("username/my-awesome-model") upload_folder( folder_path="path_to_your_model_repository", repo_id="username/my-awesome-model" ) ``` This will push your model to Hub. After pushing your model to Hub, you need to create the model card yourself. You can use graphical interface to edit the model card. ![Edit Model Card](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/edit_model_card.png) Every model card consists of two sections, metadata and free text. You can edit metadata from the sections in graphical UI. If you have saved your model using `save_pretrained`, you do not need to provide `pipeline_tag` and `library_name`. If not, provide `pipeline_tag`, `library_name` and dataset if it exists on Hugging Face Hub. Aside from these, you need to add the `jax-diffusers-event` to `tags` section. ``` --- license: apache-2.0 library_name: diffusers tags: - jax-diffusers-event datasets: - red_caps pipeline_tag: text-to-image --- ``` ![Edit Metadata](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/edit_metadata.png) ### Creating our Space <h4> Writing our Application </h4> We will use [Gradio](https://gradio.app/) to build our applications. Gradio has two main APIs: `Interface` and `Blocks`. `Interface` is a high-level API that lets you create an interface with few lines of code, and `Blocks` is a lower-level API that gives you more flexibility over interfaces you can build. The code should be included in a file called `app.py`. Let's try to create a ControlNet app as an example. The `Interface` API simply works like below 👇 ```python import gradio as gr # inference function takes prompt, negative prompt and image def infer(prompt, negative_prompt, image): # implement your inference function here return output_image # you need to pass inputs and outputs according to inference function gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image").launch() ``` You can customize your interface by passing `title`, `description` and `examples` to the `Interface` function. ```python title = "ControlNet on Canny Filter" description = "This is a demo on ControlNet based on canny filter." # you need to pass your examples according to your inputs # each inner list is one example, each element in the list corresponding to a component in the `inputs`. examples = [["a cat with cake texture", "low quality", "cat_image.png"]] gr.Interface(fn = infer, inputs = ["text", "text", "image"], outputs = "image", title = title, description = description, examples = examples, theme='gradio/soft').launch() ``` Your interface will look like below 👇 ![ControlNet](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio_controlnet.png) With Blocks, you can add markdown, tabs, components under columns and rows and more. Assume we have two ControlNets and we want to include them in one Space. We will have them under different tabs under one demo like below 👇 ```python import gradio as gr def infer_segmentation(prompt, negative_prompt, image): # your inference function for segmentation control return im def infer_canny(prompt, negative_prompt, image): # your inference function for canny control return im with gr.Blocks(theme='gradio/soft') as demo: gr.Markdown("## Stable Diffusion with Different Controls") gr.Markdown("In this app, you can find different ControlNets with different filters. ") with gr.Tab("ControlNet on Canny Filter "): prompt_input_canny = gr.Textbox(label="Prompt") negative_prompt_canny = gr.Textbox(label="Negative Prompt") canny_input = gr.Image(label="Input Image") canny_output = gr.Image(label="Output Image") submit_btn = gr.Button(value = "Submit") canny_inputs = [prompt_input_canny, negative_prompt_canny, canny_input] submit_btn.click(fn=infer_canny, inputs=canny_inputs, outputs=[canny_output]) with gr.Tab("ControlNet with Semantic Segmentation"): prompt_input_seg = gr.Textbox(label="Prompt") negative_prompt_seg = gr.Textbox(label="Negative Prompt") seg_input = gr.Image(label="Image") seg_output = gr.Image(label="Output Image") submit_btn = gr.Button(value = "Submit") seg_inputs = [prompt_input_seg, negative_prompt_seg, seg_input] submit_btn.click(fn=infer_segmentation, inputs=seg_inputs, outputs=[seg_output]) demo.launch() ``` Above demo will look like below 👇 ![Gradio Blocks](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio_controlnet_blocks.png) #### Creating our Space After our application is written, we can create a Hugging Face Space to host our app. You can go to [huggingface.co](http://huggingface.co), click on your profile on top right and select “New Space”. ![New Space](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_space.png) We can name our Space, pick a license and select Space SDK as “Gradio”. ![Space Configuration](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/space_config.png) After creating the Space, you can either use the instructions below to clone the repository locally, add your files and push, or use the graphical interface to create the files and write the code in the browser. ![Spaces Landing](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/repository_landing.png) To upload your application file, pick “Add File” and drag and drop your file. ![New Space Landing](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/add_file.png) Lastly, we have to create a file called `requirements.txt` and add requirements of our project. Make sure to install below versions of jax, diffusers and other dependencies like below. ``` -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html jax[cuda11_cudnn805] jaxlib git+https://github.com/huggingface/diffusers@main opencv-python transformers flax ``` We will give you GPU grant so your application can run on GPU. We have a leaderboard hosted [here](https://huggingface.co/spaces/jax-diffusers-event/leaderboard) and we will be distributing prizes from this leaderboard. To make your Space show up on the leaderboard, simply edit `README.md` of your Space to have the tag `jax-diffusers-event` under tags like below 👇 ``` --- title: Canny Coyo1m emoji: 💜 ... tags: - jax-diffusers-event --- ``` ## Prizes For this sprint we will have many prizes. We will pick the first ten projects from [this leaderboard](https://huggingface.co/spaces/jax-diffusers-event/leaderboard), so you should tag your Space for the leaderboard to make your submission complete, as instructed in above section. The projects are ranked by likes, so we will amplify the visibility of all projects for people to cast their votes by leaving a like on the Space. We will pick the first ten projects from the ranking, and the jury will cast their votes to determine the first three places. These projects will be highlighted by both Google and Hugging Face. Elaborately made interfaces as well as projects with open-sourced codebases and models will likely increase the chance of winning prizes. Prizes are as follows and are given to each team member 👇 **First Place**: A voucher of $150 that you can spend at [Hugging Face Store](https://store.huggingface.co/), Hugging Face Hub PRO subscription for one year, Natural Language Processing with Transformers book **Second Place**: A voucher of $125 that you can spend at [Hugging Face Store](https://store.huggingface.co/), Hugging Face Hub PRO subscription for one year **Third Place**: A voucher of $100 that you can spend at [Hugging Face Store](https://store.huggingface.co/), Hugging Face Hub PRO subscription for one year The first ten projects on the leaderboard (regardless of jury decision) will win a merch set exclusively made for this sprint by Hugging Face, and an separate JAX merch set from Google. ## Jury Our jury panel for this sprint included: 1. Robin Rombach, Stability AI 2. Huiwen Chang, Google Research 3. Jun-Yan Zhu, Carnegie Mellon University 4. Merve Noyan, Hugging Face ## FAQ In this section, We are collecting answers to frequently asked questions from our discord channel. Contributions welcome! ### How to Use VSCode with TPU VM? You can follow this [general guide](https://medium.com/@ivanzhd/vscode-sftp-connection-to-compute-engine-on-google-cloud-platform-gcloud-9312797d56eb) on how to use VSCode remote to connect to Google Cloud VMs. Once it's set up, you can develop on the TPU VM using VSCode. To get your external IP, use this command: ``` gcloud compute tpus tpu-vm describe <node_name> --zone=<zone> ``` It should be listed under 'accessConfig' -> 'externalIp' ### How to Test Your Code Locally? Since team members are sharing the TPU VM, it might be practical to write and test your code locally on a CPU while your teammates are running the training process on the VM. To run local testing, it is important to set the `xla_force_host_platform_device_count` flag to `4`. Read more on the [documentation](https://jax.readthedocs.io/en/latest/jax-101/06-parallelism.html#aside-hosts-and-devices-in-jax). ## Sprint winners Top 10 projects (based on the likes on their demo applications) are available on this [leaderboard](https://huggingface.co/spaces/jax-diffusers-event/leaderboard). We tooks this leaderboard to our [jury](#jury) to judge the top 10 projects based on several factors such as open-source model checkpoints, datasets, and codebases, completeness of the model and dataset cards, etc. As a result, following three projects emerged as the winners: 1. [ControlNet for Interior Design](https://huggingface.co/spaces/controlnet-interior-design/controlnet-seg) 2. [ControlNet for Adjusting Brightness](https://huggingface.co/spaces/ioclab/brightness-controlnet) 3. [Stable Diffusion with Hand Control](https://huggingface.co/spaces/vllab/controlnet-hands)
diffusion-models-class/units/en/events/4.mdx/0
{ "file_path": "diffusion-models-class/units/en/events/4.mdx", "repo_id": "diffusion-models-class", "token_count": 11592 }
150
import wandb import numpy as np import torch, torchvision import torch.nn.functional as F from PIL import Image from tqdm.auto import tqdm from fastcore.script import call_parse from torchvision import transforms from diffusers import DDPMPipeline from diffusers import DDIMScheduler from datasets import load_dataset from matplotlib import pyplot as plt @call_parse def train( image_size = 256, batch_size = 16, grad_accumulation_steps = 2, num_epochs = 1, start_model = "google/ddpm-bedroom-256", dataset_name = "huggan/wikiart", device='cuda', model_save_name='wikiart_1e', wandb_project='dm_finetune', log_samples_every = 250, save_model_every = 2500, ): # Wandb pour l'enregistrement wandb.init(project=wandb_project, config=locals()) # Préparer le modèle pré-entraîné image_pipe = DDPMPipeline.from_pretrained(start_model); image_pipe.to(device) # Obtenir un planificateur pour l'échantillonnage sampling_scheduler = DDIMScheduler.from_config(start_model) sampling_scheduler.set_timesteps(num_inference_steps=50) # Préparer le jeu de données dataset = load_dataset(dataset_name, split="train") preprocess = transforms.Compose( [ transforms.Resize((image_size, image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def transform(examples): images = [preprocess(image.convert("RGB")) for image in examples["image"]] return {"images": images} dataset.set_transform(transform) train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) # Optimiseur et planificateur du taux d'apprentissage optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=1e-5) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) for epoch in range(num_epochs): for step, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader)): # Obtenir des images propres clean_images = batch['images'].to(device) # Exemple de bruit à ajouter aux images noise = torch.randn(clean_images.shape).to(clean_images.device) bs = clean_images.shape[0] # Échantillonner un pas de temps aléatoire pour chaque image timesteps = torch.randint(0, image_pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device).long() # Ajouter du bruit aux images propres en fonction de la grandeur du bruit à chaque étape # (il s'agit du processus de diffusion vers l'avant). noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps) # Obtenir la prédiction du modèle pour le bruit noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Comparez la prédiction avec le bruit réel loss = F.mse_loss(noise_pred, noise) # Log la perte wandb.log({'loss':loss.item()}) # Calculer les gradients loss.backward() # Accumulation du gradient : mettre à jour seulement tous les grad_accumulation_steps if (step+1)%grad_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() # Enregistrer occasionnellement des échantillons if (step+1)%log_samples_every == 0: x = torch.randn(8, 3, 256, 256).to(device) # Batch de 8 for i, t in tqdm(enumerate(sampling_scheduler.timesteps)): model_input = sampling_scheduler.scale_model_input(x, t) with torch.no_grad(): noise_pred = image_pipe.unet(model_input, t)["sample"] x = sampling_scheduler.step(noise_pred, t, x).prev_sample grid = torchvision.utils.make_grid(x, nrow=4) im = grid.permute(1, 2, 0).cpu().clip(-1, 1)*0.5 + 0.5 im = Image.fromarray(np.array(im*255).astype(np.uint8)) wandb.log({'Sample generations': wandb.Image(im)}) # Sauvegarde occasionnelle du modèle if (step+1)%save_model_every == 0: image_pipe.save_pretrained(model_save_name+f'step_{step+1}') # Mise à jour du taux d'apprentissage pour l'époque suivante scheduler.step() # Sauvegarder le pipeline une dernière fois image_pipe.save_pretrained(model_save_name) # Terminer l'éxécution wandb.finish()
diffusion-models-class/units/fr/unit2/finetune_model.py/0
{ "file_path": "diffusion-models-class/units/fr/unit2/finetune_model.py", "repo_id": "diffusion-models-class", "token_count": 2153 }
151
<jupyter_start><jupyter_text>Diffusion pour l'audio Dans ce *notebook*, nous allons jeter un bref coup d'œil à la génération d'audio avec des modèles de diffusion.Ce que vous allez apprendre :- Comment l'audio est représenté dans un ordinateur- Les méthodes de conversion entre les données audio brutes et les spectrogrammes- Comment préparer un chargeur de données avec une fonction personnalisée pour convertir des tranches d'audio en spectrogrammes- *Finetuner* un modèle de diffusion audio existant sur un genre de musique spécifique- Télécharger votre pipeline personnalisé sur le Hub d'Hugging FaceMise en garde : il s'agit principalement d'un objectif pédagogique - rien ne garantit que notre modèle sonnera bien 😉Commençons ! Configuration et importations<jupyter_code># !pip install -q datasets diffusers torchaudio accelerate import torch, random import numpy as np import torch.nn.functional as F from tqdm.auto import tqdm from IPython.display import Audio from matplotlib import pyplot as plt from diffusers import DiffusionPipeline from torchaudio import transforms as AT from torchvision import transforms as IT<jupyter_output><empty_output><jupyter_text>Echantillonnage à partir d'un pipeline audio pré-entraînéCommençons par suivre la [documentation](https://huggingface.co/docs/diffusers/api/pipelines/audio_diffusion) pour charger un modèle de diffusion audio préexistant :<jupyter_code># Chargement d'un pipeline de diffusion audio pré-entraîné device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-instrumental-hiphop-256").to(device)<jupyter_output><empty_output><jupyter_text>Comme pour les pipelines que nous avons utilisés dans les unités précédentes, nous pouvons créer des échantillons en appelant le pipeline comme suit :<jupyter_code># Échantillonner à partir du pipeline et afficher les résultats output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate()))<jupyter_output><empty_output><jupyter_text>Ici, l'argument `rate` spécifie la fréquence d'échantillonnage de l'audio ; nous y reviendrons plus tard. Vous remarquerez également que le pipeline renvoie plusieurs choses. Que se passe-t-il ici ? Examinons de plus près les deux sorties.La première est un tableau de données, représentant l'audio généré :<jupyter_code># Le tableau audio output.audios[0].shape<jupyter_output><empty_output><jupyter_text>La seconde ressemble à une image en niveaux de gris :<jupyter_code># L'image de sortie (spectrogramme) output.images[0].size<jupyter_output><empty_output><jupyter_text>Cela nous donne un aperçu du fonctionnement de ce pipeline. L'audio n'est pas directement généré par diffusion. Au lieu de cela, le pipeline a le même type d'UNet 2D que les pipelines de génération d'images inconditionnelles que nous avons vus dans l'unité 1, qui est utilisé pour générer le spectrogramme, qui est ensuite post-traité dans l'audio final.Le pipeline possède un composant supplémentaire qui gère ces conversions, auquel nous pouvons accéder via `pipe.mel` :<jupyter_code>pipe.mel<jupyter_output><empty_output><jupyter_text>De l'audio à l'image et inversementUne "forme d'onde" encode les échantillons audio bruts dans le temps. Il peut s'agir du signal électrique reçu d'un microphone, par exemple. Travailler avec cette représentation du "domaine temporel" peut s'avérer délicat, c'est pourquoi il est courant de la convertir sous une autre forme, communément appelée spectrogramme. Un spectrogramme montre l'intensité de différentes fréquences (axe y) en fonction du temps (axe x) :<jupyter_code># Calculer et afficher un spectrogramme pour notre échantillon audio généré en utilisant torchaudio spec_transform = AT.Spectrogram(power=2) spectrogram = spec_transform(torch.tensor(output.audios[0])) print(spectrogram.min(), spectrogram.max()) log_spectrogram = spectrogram.log() plt.imshow(log_spectrogram[0], cmap='gray');<jupyter_output>tensor(0.) tensor(6.0842)<jupyter_text>Le spectrogramme que nous venons de créer contient des valeurs comprises entre 0,0000000000001 et 1, la plupart d'entre elles étant proches de la limite inférieure de cette plage. Ce n'est pas l'idéal pour la visualisation ou la modélisation. En fait, nous avons dû prendre le logarithme de ces valeurs pour obtenir un tracé en niveaux de gris qui montre des détails. Pour cette raison, nous utilisons généralement un type spécial de spectrogramme appelé Mel spectrogramme, qui est conçu pour capturer les types d'informations qui sont importantes pour l'audition humaine en appliquant certaines transformations aux différentes composantes de fréquence du signal. *Quelques transformations audio de la documentation [torchaudio](https://pytorch.org/audio/stable/transforms.html)* Heureusement pour nous, nous n'avons pas besoin de nous préoccuper de ces transformations, la fonctionnalité `mel` du pipeline s'occupe de ces détails pour nous. En l'utilisant, nous pouvons convertir une image de spectrogramme en audio comme suit :<jupyter_code>a = pipe.mel.image_to_audio(output.images[0]) a.shape<jupyter_output><empty_output><jupyter_text>Nous pouvons également convertir un tableau de données audio en images de spectrogramme en chargeant d'abord les données audio brutes, puis en appelant la fonction `audio_slice_to_image()`. Les clips plus longs sont automatiquement découpés en morceaux de la bonne longueur pour produire une image de spectrogramme de 256x256 :<jupyter_code>pipe.mel.load_audio(raw_audio=a) im = pipe.mel.audio_slice_to_image(0) im<jupyter_output><empty_output><jupyter_text>L'audio est représenté sous la forme d'un long tableau de nombres. Pour l'écouter nous avons besoin d'une autre information clé : la fréquence d'échantillonnage. Combien d'échantillons (valeurs individuelles) utilisons-nous pour représenter une seconde d'audio ?Nous pouvons voir la fréquence d'échantillonnage utilisée lors de l'entraînement de ce pipeline avec :<jupyter_code>sample_rate_pipeline = pipe.mel.get_sample_rate() sample_rate_pipeline<jupyter_output><empty_output><jupyter_text>Si nous spécifions mal la fréquence d'échantillonnage, nous obtenons un son accéléré ou ralenti :<jupyter_code>display(Audio(output.audios[0], rate=44100)) # Vitesse x2<jupyter_output><empty_output><jupyter_text>*Finetuning* du pipelineMaintenant que nous avons une compréhension approximative du fonctionnement du pipeline, nous allons le *finetuner* sur de nouvelles données audio !Le jeu de données est une collection de clips audio de différents genres, que nous pouvons charger depuis le Hub de la manière suivante :<jupyter_code>from datasets import load_dataset dataset = load_dataset('lewtun/music_genres', split='train') dataset<jupyter_output>Using custom data configuration lewtun--music_genres-2cfa9201f94788d8 Found cached dataset parquet (/home/ubuntu/.cache/huggingface/datasets/lewtun___parquet/lewtun--music_genres-2cfa9201f94788d8/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>Vous pouvez utiliser le code ci-dessous pour voir les différents genres dans le jeu de données et combien d'échantillons sont contenus dans chacun d'eux :<jupyter_code>for g in list(set(dataset['genre'])): print(g, sum(x==g for x in dataset['genre']))<jupyter_output>Pop 945 Blues 58 Punk 2582 Old-Time / Historic 408 Experimental 1800 Folk 1214 Electronic 3071 Spoken 94 Classical 495 Country 142 Instrumental 1044 Chiptune / Glitch 1181 International 814 Ambient Electronic 796 Jazz 306 Soul-RnB 94 Hip-Hop 1757 Easy Listening 13 Rock 3095<jupyter_text>Le jeu de données contient les données audio sous forme de tableaux :<jupyter_code>audio_array = dataset[0]['audio']['array'] sample_rate_dataset = dataset[0]['audio']['sampling_rate'] print('Audio array shape:', audio_array.shape) print('Sample rate:', sample_rate_dataset) display(Audio(audio_array, rate=sample_rate_dataset))<jupyter_output>Audio array shape: (1323119,) Sample rate: 44100<jupyter_text>Notez que la fréquence d'échantillonnage de cet audio est plus élevée. Si nous voulons utiliser le pipeline existant, nous devrons le "rééchantillonner" pour qu'il corresponde à la fréquence d'échantillonnage. Les clips sont également plus longs que ceux pour lesquels le pipeline est configuré. Heureusement, lorsque nous chargeons l'audio à l'aide de `pipe.mel`, il découpe automatiquement le clip en sections plus petites :<jupyter_code>a = dataset[0]['audio']['array'] # Obtenir le tableau audio pipe.mel.load_audio(raw_audio=a) # Le charger avec pipe.mel pipe.mel.audio_slice_to_image(0) # Visualiser la première "tranche" sous forme de spectrogramme<jupyter_output><empty_output><jupyter_text>Nous devons penser à ajuster le taux d'échantillonnage, car les données de ce jeu de données comportent deux fois plus d'échantillons par seconde :<jupyter_code>sample_rate_dataset = dataset[0]['audio']['sampling_rate'] sample_rate_dataset<jupyter_output><empty_output><jupyter_text>Ici, nous utilisons les transformations de torchaudio (importées sous le nom AT) pour effectuer le rééchantillonnage, le pipeline `mel` pour transformer l'audio en image et les transformations de torchvision (importées sous le nom IT) pour transformer les images en tenseurs. Nous obtenons ainsi une fonction qui transforme un clip audio en un tenseur de spectrogramme que nous pouvons utiliser pour nous entraîner :<jupyter_code>resampler = AT.Resample(sample_rate_dataset, sample_rate_pipeline, dtype=torch.float32) to_t = IT.ToTensor() def to_image(audio_array): audio_tensor = torch.tensor(audio_array).to(torch.float32) audio_tensor = resampler(audio_tensor) pipe.mel.load_audio(raw_audio=np.array(audio_tensor)) num_slices = pipe.mel.get_number_of_slices() slice_idx = random.randint(0, num_slices-1) # Piocher une tranche aléatoire à chaque fois (à l'exception de la dernière tranche courte) im = pipe.mel.audio_slice_to_image(slice_idx) return im<jupyter_output><empty_output><jupyter_text>Nous utiliserons notre fonction `to_image()` dans le cadre d'une fonction collate personnalisée pour transformer notre jeu de données en un chargeur de données utilisable pour l'entraînement. La fonction collate définit la manière de transformer un batch d'exemples du jeu de données en un batch final de données prêtes à être entraînées. Dans ce cas, nous transformons chaque échantillon audio en une image de spectrogramme et nous empilons les tenseurs résultants :<jupyter_code>def collate_fn(examples): # vers l'image -> vers le tenseur -> redimensionnement vers (-1, 1) -> empiler dans le batch audio_ims = [to_t(to_image(x['audio']['array']))*2-1 for x in examples] return torch.stack(audio_ims) # Créer un jeu de données avec uniquement le genre de chansons 'Chiptune / Glitch' batch_size=4 # 4 sur Colab, 12 sur A100 chosen_genre = 'Electronic' # <<< Essayer d'entraîner sur des genres différents <<< indexes = [i for i, g in enumerate(dataset['genre']) if g == chosen_genre] filtered_dataset = dataset.select(indexes) dl = torch.utils.data.DataLoader(filtered_dataset.shuffle(), batch_size=batch_size, collate_fn=collate_fn, shuffle=True) batch = next(iter(dl)) print(batch.shape)<jupyter_output>torch.Size([4, 1, 256, 256])<jupyter_text>**NB : Vous devrez utiliser une taille de batch inférieure (par exemple 4) à moins que vous ne disposiez d'une grande quantité de vRAM GPU.** Boucle d'entraînementVoici une boucle d'entraînement simple qui s'exécute à travers le chargeur de données pour quelques époques afin de *finetuner* le pipeline UNet. Vous pouvez également ignorer cette cellule et charger le pipeline avec le code de la cellule suivante.<jupyter_code>epochs = 3 lr = 1e-4 pipe.unet.train() pipe.scheduler.set_timesteps(1000) optimizer = torch.optim.AdamW(pipe.unet.parameters(), lr=lr) for epoch in range(epochs): for step, batch in tqdm(enumerate(dl), total=len(dl)): # Préparer les images d'entrée clean_images = batch.to(device) bs = clean_images.shape[0] # Échantillonner un pas de temps aléatoire pour chaque image timesteps = torch.randint( 0, pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device ).long() # Ajouter du bruit aux images propres en fonction de l'ampleur du bruit à chaque étape noise = torch.randn(clean_images.shape).to(clean_images.device) noisy_images = pipe.scheduler.add_noise(clean_images, noise, timesteps) # Obtenir la prédiction du modèle noise_pred = pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Calculer la perte loss = F.mse_loss(noise_pred, noise) loss.backward(loss) # Mise à jour des paramètres du modèle à l'aide de l'optimiseur optimizer.step() optimizer.zero_grad() # OU : Charger la version entraînée précédemment pipe = DiffusionPipeline.from_pretrained("johnowhitaker/Electronic_test").to(device) output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=22050)) # Créer un échantillon plus long en passant un tenseur de bruit de départ avec une forme différente noise = torch.randn(1, 1, pipe.unet.sample_size[0],pipe.unet.sample_size[1]*4).to(device) output = pipe(noise=noise) display(output.images[0]) display(Audio(output.audios[0], rate=22050))<jupyter_output><empty_output><jupyter_text>Ce ne sont pas les résultats les plus impressionnants mais c'est un début :) Essayez d'ajuster le taux d'apprentissage et le nombre d'époques, et partagez vos meilleurs résultats sur Discord pour que nous puissions nous améliorer ensemble ! Quelques éléments à prendre en compte- Nous travaillons avec des images de spectrogrammes carrés de 256 pixels ce qui limite la taille de nos batchs. Pouvez-vous récupérer de l'audio de qualité suffisante à partir d'un spectrogramme de 128x128 ?- Au lieu d'une augmentation aléatoire de l'image, nous choisissons à chaque fois des tranches différentes du clip audio, mais cela pourrait-il être amélioré avec différents types d'augmentation lorsque l'on s'entraîne pendant de nombreuses époques ?- Comment pourrions-nous utiliser cette méthode pour générer des clips plus longs ? Peut-être pourriez-vous générer un clip de départ de 5 secondes, puis utiliser des idées inspirées de l'inpainting pour continuer à générer des segments audio supplémentaires à partir du clip initial...- Quel est l'équivalent d'une image à image dans ce contexte de diffusion de spectrogrammes ? Pousser sur le HubUne fois que vous êtes satisfait de votre modèle, vous pouvez le sauvegarder et le transférer sur le Hub pour que d'autres personnes puissent en profiter :<jupyter_code>from huggingface_hub import get_full_repo_name, HfApi, create_repo, ModelCard # Choisir un nom pour le modèle model_name = "audio-diffusion-electronic" hub_model_id = get_full_repo_name(model_name) # Sauvegarder le pipeline localement pipe.save_pretrained(model_name) # Inspecter le contenu du dossier !ls {model_name} # Créer un dépôt create_repo(hub_model_id) # Télécharger les fichiers api = HfApi() api.upload_folder( folder_path=f"{model_name}/scheduler", path_in_repo="scheduler", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{model_name}/mel", path_in_repo="mel", repo_id=hub_model_id ) api.upload_folder(folder_path=f"{model_name}/unet", path_in_repo="unet", repo_id=hub_model_id) api.upload_file( path_or_fileobj=f"{model_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) # Pousser une carte de modèle content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-audio-generation - diffusion-models-class --- # Model Card for Unit 4 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional audio generation of music in the genre {chosen_genre} ## Usage ```python from IPython.display import Audio from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("{hub_model_id}") output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output>
diffusion-models-class/units/fr/unit4/diffusion_for_audio.ipynb/0
{ "file_path": "diffusion-models-class/units/fr/unit4/diffusion_for_audio.ipynb", "repo_id": "diffusion-models-class", "token_count": 5905 }
152
<jupyter_start><jupyter_text>Manipulation de plusieurs séquences (TensorFlow) Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "J'ai attendu un cours d’HuggingFace toute ma vie." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = tf.constant(ids) # Cette ligne va échouer model(input_ids) tokenized_inputs = tokenizer(sequence, return_tensors="tf") print(tokenized_inputs["input_ids"]) import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "J'ai attendu un cours d’HuggingFace toute ma vie." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = tf.constant([ids]) print("Input IDs:", input_ids) output = model(input_ids) print("Logits:", output.logits) batched_ids = [ [200, 200, 200], [200, 200] ] padding_id = 100 batched_ids = [ [200, 200, 200], [200, 200, padding_id], ] model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence1_ids = [[200, 200, 200]] sequence2_ids = [[200, 200]] batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] print(model(tf.constant(sequence1_ids)).logits) print(model(tf.constant(sequence2_ids)).logits) print(model(tf.constant(batched_ids)).logits) batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] attention_mask = [ [1, 1, 1], [1, 1, 0], ] outputs = model(tf.constant(batched_ids), attention_mask=tf.constant(attention_mask)) print(outputs.logits) # max_sequence_length = 512 equence = sequence[:max_sequence_length]<jupyter_output><empty_output>
notebooks/course/fr/chapter2/section5_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter2/section5_tf.ipynb", "repo_id": "notebooks", "token_count": 810 }
153
<jupyter_start><jupyter_text>Données massives ? 🤗 Datasets à la rescousse ! Installez les bibliothèques 🤗 Transformers et 🤗 Datasets pour exécuter ce *notebook*.<jupyter_code>!pip install datasets evaluate transformers[sentencepiece] !pip install zstandard from datasets import load_dataset # Cela prend quelques minutes à exécuter, alors allez prendre un thé ou un café en attendant :) data_files = "https://the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst" pubmed_dataset = load_dataset("json", data_files=data_files, split="train") pubmed_dataset pubmed_dataset[0] !pip install psutil import psutil # Process.memory_info est exprimé en octets, donc convertir en mégaoctets print(f"RAM used: {psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB") print(f"Number of files in dataset : {pubmed_dataset.dataset_size}") size_gb = pubmed_dataset.dataset_size / (1024**3) print(f"Dataset size (cache file) : {size_gb:.2f} GB") import timeit code_snippet = """batch_size = 1000 for idx in range(0, len(pubmed_dataset), batch_size): _ = pubmed_dataset[idx:idx + batch_size] """ time = timeit.timeit(stmt=code_snippet, number=1, globals=globals()) print( f"Iterated over {len(pubmed_dataset)} examples (about {size_gb:.1f} GB) in " f"{time:.1f}s, i.e. {size_gb/time:.3f} GB/s" ) pubmed_dataset_streamed = load_dataset( "json", data_files=data_files, split="train", streaming=True ) next(iter(pubmed_dataset_streamed)) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") tokenized_dataset = pubmed_dataset_streamed.map(lambda x: tokenizer(x["text"])) next(iter(tokenized_dataset)) shuffled_dataset = pubmed_dataset_streamed.shuffle(buffer_size=10_000, seed=42) next(iter(shuffled_dataset)) dataset_head = pubmed_dataset_streamed.take(5) list(dataset_head) # Ignorer les 1 000 premiers exemples et inclure le reste dans l'ensemble d'apprentissage. train_dataset = shuffled_dataset.skip(1000) # Prendre les 1 000 premiers exemples pour l'ensemble de validation. validation_dataset = shuffled_dataset.take(1000) law_dataset_streamed = load_dataset( "json", data_files="https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst", split="train", streaming=True, ) next(iter(law_dataset_streamed)) from itertools import islice from datasets import interleave_datasets combined_dataset = interleave_datasets([pubmed_dataset_streamed, law_dataset_streamed]) list(islice(combined_dataset, 2)) base_url = "https://the-eye.eu/public/AI/pile/" data_files = { "train": [base_url + "train/" + f"{idx:02d}.jsonl.zst" for idx in range(30)], "validation": base_url + "val.jsonl.zst", "test": base_url + "test.jsonl.zst", } pile_dataset = load_dataset("json", data_files=data_files, streaming=True) next(iter(pile_dataset["train"]))<jupyter_output><empty_output>
notebooks/course/fr/chapter5/section4.ipynb/0
{ "file_path": "notebooks/course/fr/chapter5/section4.ipynb", "repo_id": "notebooks", "token_count": 1168 }
154
<jupyter_start><jupyter_text>Classification de token (TensorFlow) Installez les bibliothèques 🤗 *Datasets*, 🤗 *Transformers* et 🤗 *Accelerate* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from datasets import load_dataset raw_datasets = load_dataset("wikiann","fr") raw_datasets raw_datasets["train"][0]["tokens"] raw_datasets["train"][0]["ner_tags"] ner_feature = raw_datasets["train"].features["ner_tags"] ner_feature label_names = ner_feature.feature.names label_names words = raw_datasets["train"][0]["tokens"] labels = raw_datasets["train"][0]["ner_tags"] line1 = "" line2 = "" for word, label in zip(words, labels): full_label = label_names[label] max_length = max(len(word), len(full_label)) line1 += word + " " * (max_length - len(word) + 1) line2 += full_label + " " * (max_length - len(full_label) + 1) print(line1) print(line2) from transformers import AutoTokenizer model_checkpoint = "camembert-base" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) tokenizer.is_fast inputs = tokenizer(raw_datasets["train"][0]["tokens"], is_split_into_words=True) inputs.tokens() inputs.word_ids() def align_labels_with_tokens(labels, word_ids): new_labels = [] current_word = None for word_id in word_ids: if word_id != current_word: # Début d'un nouveau mot ! current_word = word_id label = -100 if word_id is None else labels[word_id] new_labels.append(label) elif word_id is None: # Token special new_labels.append(-100) else: # Même mot que le token précédent label = labels[word_id] # Si l'étiquette est B-XXX, nous la changeons en I-XXX if label % 2 == 1: label += 1 new_labels.append(label) return new_labels labels = raw_datasets["train"][0]["ner_tags"] word_ids = inputs.word_ids() print(labels) print(align_labels_with_tokens(labels, word_ids)) def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples["tokens"], truncation=True, is_split_into_words=True ) all_labels = examples["ner_tags"] new_labels = [] for i, labels in enumerate(all_labels): word_ids = tokenized_inputs.word_ids(i) new_labels.append(align_labels_with_tokens(labels, word_ids)) tokenized_inputs["labels"] = new_labels return tokenized_inputs tokenized_datasets = raw_datasets.map( tokenize_and_align_labels, batched=True, remove_columns=raw_datasets["train"].column_names, ) from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification( tokenizer=tokenizer, return_tensors="tf" ) batch = data_collator([tokenized_datasets["train"][i] for i in range(2)]) batch["labels"] for i in range(2): print(tokenized_datasets["train"][i]["labels"]) tf_train_dataset = tokenized_datasets["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], collate_fn=data_collator, shuffle=True, batch_size=16, ) tf_eval_dataset = tokenized_datasets["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], collate_fn=data_collator, shuffle=False, batch_size=16, ) id2label = {str(i): label for i, label in enumerate(label_names)} label2id = {v: k for k, v in id2label.items()} from transformers import TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained( model_checkpoint, id2label=id2label, label2id=label2id, ) model.config.num_labels from huggingface_hub import notebook_login notebook_login() from transformers import create_optimizer import tensorflow as tf # Train in mixed-precision float16 # Commentez cette ligne si vous utilisez un GPU qui ne bénéficiera pas de cette fonction. tf.keras.mixed_precision.set_global_policy("mixed_float16") # Le nombre d'étapes d'entraînement est le nombre d'échantillons dans le jeu de données, divisé par la taille du batch puis multiplié # par le nombre total d'époques. Notez que le jeu de données tf_train_dataset est ici un lot de données tf.data.Dataset, # pas le jeu de données original Hugging Face, donc son len() est déjà num_samples // batch_size. num_epochs = 3 num_train_steps = len(tf_train_dataset) * num_epochs optimizer, schedule = create_optimizer( init_lr=2e-5, num_warmup_steps=0, num_train_steps=num_train_steps, weight_decay_rate=0.01, ) model.compile(optimizer=optimizer) from transformers.keras_callbacks import PushToHubCallback callback = PushToHubCallback(output_dir="camembert-finetuned-ner", tokenizer=tokenizer) model.fit( tf_train_dataset, validation_data=tf_eval_dataset, callbacks=[callback], epochs=num_epochs, ) !pip install seqeval from datasets import load_metric metric = load_metric("seqeval") labels = raw_datasets["train"][0]["ner_tags"] labels = [label_names[i] for i in labels] labels predictions = labels.copy() predictions[2] = "O" metric.compute(predictions=[predictions], references=[labels]) import numpy as np all_predictions = [] all_labels = [] for batch in tf_eval_dataset: logits = model.predict_on_batch(batch)["logits"] labels = batch["labels"] predictions = np.argmax(logits, axis=-1) for prediction, label in zip(predictions, labels): for predicted_idx, label_idx in zip(prediction, label): if label_idx == -100: continue all_predictions.append(label_names[predicted_idx]) all_labels.append(label_names[label_idx]) metric.compute(predictions=[all_predictions], references=[all_labels]) from transformers import pipeline # Remplacez par votre propre checkpoint model_checkpoint = "huggingface-course/camembert-finetuned-ner" token_classifier = pipeline( "token-classification", model=model_checkpoint, aggregation_strategy="simple" ) oken_classifier("Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.")<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section2_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section2_tf.ipynb", "repo_id": "notebooks", "token_count": 2616 }
155
<jupyter_start><jupyter_text>You will need an authentication token with your Hugging Face credentials to use the `push_to_hub` method. Execute `huggingface-cli login` in your terminal or by uncommenting the following cell:<jupyter_code># !huggingface-cli login import numpy as np from datasets import load_dataset, load_metric from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainingArguments, ) checkpoint = "bert-base-cased" raw_datasets = load_dataset("glue", "mrpc") tokenizer = AutoTokenizer.from_pretrained(checkpoint) def tokenize_function(examples): return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) training_args = TrainingArguments( "finetuned-bert-mrpc", per_device_train_batch_size=16, per_device_eval_batch_size=16, learning_rate=2e-5, weight_decay=0.01, evaluation_strategy="epoch", logging_strategy="epoch", log_level="error", push_to_hub=True, push_to_hub_model_id="finetuned-bert-mrpc", # push_to_hub_organization="huggingface", # push_to_hub_token="my_token", ) data_collator = DataCollatorWithPadding(tokenizer) metric = load_metric("glue", "mrpc") def compute_metrics(eval_preds): logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels) trainer = Trainer( model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics, ) trainer.train()<jupyter_output><empty_output><jupyter_text>Push to hub from the Trainer directly You will need an authentication token with your Hugging Face credentials to use the `push_to_hub` method. Execute `huggingface-cli login` in your terminal or by uncommenting the following cell:<jupyter_code># !huggingface-cli login<jupyter_output><empty_output><jupyter_text>The `Trainer` has a new method to directly upload the model, tokenizer and model configuration in a repo on the [Hub](https://huggingface.co/). It will even auto-generate a model card draft using the hyperparameters and evaluation results!<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output><jupyter_text>If you are using your own training loop, you can push the model and tokenizer separately (and you will have to write the model card yourself):<jupyter_code># model.push_to_hub("finetuned-bert-mrpc") # tokenizer.push_to_hub("finetuned-bert-mrpc")<jupyter_output><empty_output><jupyter_text>You can load your model from anywhere using from_pretrained!<jupyter_code>from transformers import AutoModelForSequenceClassification model_name = "sgugger/finetuned-bert-mrpc" model = AutoModelForSequenceClassification.from_pretrained(model_name)<jupyter_output><empty_output><jupyter_text>You can use your model in a pipeline!<jupyter_code>from transformers import pipeline classifier = pipeline("text-classification", model=model_name) classifier("My name is Sylvain. [SEP] My name is Lysandre")<jupyter_output><empty_output><jupyter_text>Updating a problematic file is super easy!<jupyter_code>model.config.label2id = {"not equivalent": 0, "equivalent": 1} model.config.id2label = {0: "not equivalent", 1: "equivalent"} model.config.push_to_hub("finetuned-bert-mrpc") classifier = pipeline("text-classification", model=model_name) classifier("My name is Sylvain. [SEP] My name is Lysandre")<jupyter_output><empty_output>
notebooks/course/videos/push_to_hub_new.ipynb/0
{ "file_path": "notebooks/course/videos/push_to_hub_new.ipynb", "repo_id": "notebooks", "token_count": 1284 }
156
<jupyter_start><jupyter_text>Image2Image Pipeline for Stable Diffusion using 🧨 Diffusers This notebook shows how to create a custom `diffusers` pipeline for text-guided image-to-image generation with Stable Diffusion model using 🤗 Hugging Face [🧨 Diffusers library](https://github.com/huggingface/diffusers). For a general introduction to the Stable Diffusion model please refer to this [colab](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb).<jupyter_code>!nvidia-smi !pip install diffusers==0.11.1 transformers ftfy accelerate<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Requirement already satisfied: diffusers==0.11.1 in /usr/local/lib/python3.7/dist-packages (0.3.0) Requirement already satisfied: transformers in /usr/local/lib/python3.7/dist-packages (4.21.3) Requirement already satisfied: ftfy in /usr/local/lib/python3.7/dist-packages (6.1.1) Requirement already satisfied: torch>=1.4 in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (1.12.1+cu113) Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (1.21.6) Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (7.1.2) Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (4.12.0) Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from diffusers==0.11.1) (3.8.0) Requirement already satisfied:[...]<jupyter_text>To use private and gated models on 🤗 Hugging Face Hub, login is required. If you are only using a public checkpoint (such as `CompVis/stable-diffusion-v1-4` in this notebook), you can skip this step.<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output>Login successful Your token has been saved to /root/.huggingface/token Authenticated through git-credential store but this isn't the helper defined on your machine. You might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your terminal in case you want to set this credential helper as the default git config --global credential.helper store<jupyter_text>Image2Image pipeline.<jupyter_code>import inspect import warnings from typing import List, Optional, Union import torch from tqdm.auto import tqdm from diffusers import StableDiffusionImg2ImgPipeline<jupyter_output><empty_output><jupyter_text>Load the pipeline<jupyter_code>device = "cuda" model_path = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_path, torch_dtype=torch.float16, ) pipe = pipe.to(device)<jupyter_output><empty_output><jupyter_text>Download an initial image and preprocess it so we can pass it to the pipeline.<jupyter_code>import requests from io import BytesIO from PIL import Image url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_img = Image.open(BytesIO(response.content)).convert("RGB") init_img = init_img.resize((768, 512)) init_img<jupyter_output><empty_output><jupyter_text>Define the prompt and run the pipeline.<jupyter_code>prompt = "A fantasy landscape, trending on artstation"<jupyter_output><empty_output><jupyter_text>Here, `strength` is a value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input.<jupyter_code>generator = torch.Generator(device=device).manual_seed(1024) image = pipe(prompt=prompt, image=init_img, strength=0.75, guidance_scale=7.5, generator=generator).images[0] image image = pipe(prompt=prompt, image=init_img, strength=0.5, guidance_scale=7.5, generator=generator).images[0] image<jupyter_output><empty_output><jupyter_text>As you can see, when using a lower value for `strength`, the generated image is more closer to the original `image` Now using [LMSDiscreteScheduler](https://huggingface.co/docs/diffusers/api/schedulersdiffusers.LMSDiscreteScheduler)<jupyter_code>from diffusers import LMSDiscreteScheduler lms = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.scheduler = lms generator = torch.Generator(device=device).manual_seed(1024) image = pipe(prompt=prompt, image=init_img, strength=0.75, guidance_scale=7.5, generator=generator).images[0] image<jupyter_output><empty_output>
notebooks/diffusers/image_2_image_using_diffusers.ipynb/0
{ "file_path": "notebooks/diffusers/image_2_image_using_diffusers.ipynb", "repo_id": "notebooks", "token_count": 1520 }
157