text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
import { collections } from "$lib/server/database"; import { type Actions, fail, redirect } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { authCondition } from "$lib/server/auth"; import { base } from "$app/paths"; async function assistantOnlyIfAuthor(locals: App.Locals, assistantId?: string) { const assistant = await collections.assistants.findOne({ _id: new ObjectId(assistantId) }); if (!assistant) { throw Error("Assistant not found"); } if (assistant.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString()) { throw Error("You are not the author of this assistant"); } return assistant; } export const actions: Actions = { delete: async ({ params, locals }) => { let assistant; try { assistant = await assistantOnlyIfAuthor(locals, params.assistantId); } catch (e) { return fail(400, { error: true, message: (e as Error).message }); } await collections.assistants.deleteOne({ _id: assistant._id }); // and remove it from all users settings await collections.settings.updateMany( {}, { $pull: { assistants: assistant._id }, } ); // and delete all avatars const fileCursor = collections.bucket.find({ filename: assistant._id.toString() }); // Step 2: Delete the existing file if it exists let fileId = await fileCursor.next(); while (fileId) { await collections.bucket.delete(fileId._id); fileId = await fileCursor.next(); } throw redirect(302, `${base}/settings`); }, report: async ({ params, locals }) => { // is there already a report from this user for this model ? const report = await collections.reports.findOne({ assistantId: new ObjectId(params.assistantId), createdBy: locals.user?._id ?? locals.sessionId, }); if (report) { return fail(400, { error: true, message: "Already reported" }); } const { acknowledged } = await collections.reports.insertOne({ _id: new ObjectId(), assistantId: new ObjectId(params.assistantId), createdBy: locals.user?._id ?? locals.sessionId, createdAt: new Date(), updatedAt: new Date(), }); if (!acknowledged) { return fail(500, { error: true, message: "Failed to report assistant" }); } return { from: "report", ok: true, message: "Assistant reported" }; }, subscribe: async ({ params, locals }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } // don't push if it's already there const settings = await collections.settings.findOne(authCondition(locals)); if (settings?.assistants?.includes(assistant._id)) { return fail(400, { error: true, message: "Already subscribed" }); } const result = await collections.settings.updateOne(authCondition(locals), { $addToSet: { assistants: assistant._id }, }); // reduce count only if push succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: 1 } }); } return { from: "subscribe", ok: true, message: "Assistant added" }; }, unsubscribe: async ({ params, locals }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } const result = await collections.settings.updateOne(authCondition(locals), { $pull: { assistants: assistant._id }, }); // reduce count only if pull succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: -1 } }); } throw redirect(302, `${base}/settings`); }, };
chat-ui/src/routes/settings/assistants/[assistantId]/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/settings/assistants/[assistantId]/+page.server.ts", "repo_id": "chat-ui", "token_count": 1257 }
52
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": { "type": "line" }, "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "quantitative", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "quantitative", "title": "<DVC_METRIC_Y_LABEL>", "scale": { "zero": false } }, "color": { "field": "rev", "type": "nominal" } }, "transform": [ { "loess": "<DVC_METRIC_Y>", "on": "<DVC_METRIC_X>", "groupby": [ "rev" ], "bandwidth": 0.3 } ] }
datasets/.dvc/plots/smooth.json/0
{ "file_path": "datasets/.dvc/plots/smooth.json", "repo_id": "datasets", "token_count": 569 }
53
.PHONY: quality style test check_dirs := tests src benchmarks metrics utils # Check that source code meets quality standards quality: ruff check $(check_dirs) setup.py # linter ruff format --check $(check_dirs) setup.py # formatter # Format source code automatically style: ruff check --fix $(check_dirs) setup.py # linter ruff format $(check_dirs) setup.py # formatter # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/
datasets/Makefile/0
{ "file_path": "datasets/Makefile", "repo_id": "datasets", "token_count": 149 }
54
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def get_duration(func): def wrapper(*args, **kwargs): starttime = timeit.default_timer() _ = func(*args, **kwargs) delta = timeit.default_timer() - starttime return delta wrapper.__name__ = func.__name__ return wrapper def generate_examples(features: dict, num_examples=100, seq_shapes=None): dummy_data = [] seq_shapes = seq_shapes or {} for i in range(num_examples): example = {} for col_id, (k, v) in enumerate(features.items()): if isinstance(v, _ArrayXD): data = np.random.rand(*v.shape).astype(v.dtype) elif isinstance(v, datasets.Value): if v.dtype == "string": data = "The small grey turtle was surprisingly fast when challenged." else: data = np.random.randint(10, size=1).astype(v.dtype).item() elif isinstance(v, datasets.Sequence): while isinstance(v, datasets.Sequence): v = v.feature shape = seq_shapes[k] data = np.random.rand(*shape).astype(v.dtype) example[k] = data dummy_data.append((i, example)) return dummy_data def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None): dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes) with ArrowWriter(features=features, path=dataset_path) as writer: for key, record in dummy_data: example = features.encode_example(record) writer.write(example) num_final_examples, num_bytes = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." ) dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features)) return dataset
datasets/benchmarks/utils.py/0
{ "file_path": "datasets/benchmarks/utils.py", "repo_id": "datasets", "token_count": 927 }
55
# Beam Datasets Some datasets are too large to be processed on a single machine. Instead, you can process them with [Apache Beam](https://beam.apache.org/), a library for parallel data processing. The processing pipeline is executed on a distributed processing backend such as [Apache Flink](https://flink.apache.org/), [Apache Spark](https://spark.apache.org/), or [Google Cloud Dataflow](https://cloud.google.com/dataflow). We have already created Beam pipelines for some of the larger datasets like [wikipedia](https://huggingface.co/datasets/wikipedia), and [wiki40b](https://huggingface.co/datasets/wiki40b). You can load these normally with [`load_dataset`]. But if you want to run your own Beam pipeline with Dataflow, here is how: 1. Specify the dataset and configuration you want to process: ``` DATASET_NAME=your_dataset_name # ex: wikipedia CONFIG_NAME=your_config_name # ex: 20220301.en ``` 2. Input your Google Cloud Platform information: ``` PROJECT=your_project BUCKET=your_bucket REGION=your_region ``` 3. Specify your Python requirements: ``` echo "datasets" > /tmp/beam_requirements.txt echo "apache_beam" >> /tmp/beam_requirements.txt ``` 4. Run the pipeline: ``` datasets-cli run_beam datasets/$DATASET_NAME \ --name $CONFIG_NAME \ --save_info \ --cache_dir gs://$BUCKET/cache/datasets \ --beam_pipeline_options=\ "runner=DataflowRunner,project=$PROJECT,job_name=$DATASET_NAME-gen,"\ "staging_location=gs://$BUCKET/binaries,temp_location=gs://$BUCKET/temp,"\ "region=$REGION,requirements_file=/tmp/beam_requirements.txt" ``` <Tip> When you run your pipeline, you can adjust the parameters to change the runner (Flink or Spark), output location (S3 bucket or HDFS), and the number of workers. </Tip>
datasets/docs/source/beam.mdx/0
{ "file_path": "datasets/docs/source/beam.mdx", "repo_id": "datasets", "token_count": 562 }
56
# Datasets <img class="float-left !m-0 !border-0 !dark:border-0 !shadow-none !max-w-lg w-[150px]" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasets_logo.png"/> 🤗 Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks. Load a dataset in a single line of code, and use our powerful data processing methods to quickly get your dataset ready for training in a deep learning model. Backed by the Apache Arrow format, process large datasets with zero-copy reads without any memory constraints for optimal speed and efficiency. We also feature a deep integration with the [Hugging Face Hub](https://huggingface.co/datasets), allowing you to easily load and share a dataset with the wider machine learning community. Find your dataset today on the [Hugging Face Hub](https://huggingface.co/datasets), and take an in-depth look inside of it with the live viewer. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorial" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Learn the basics and become familiar with loading, accessing, and processing a dataset. Start here if you are using 🤗 Datasets for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./how_to" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Datasets to solve real-world problems.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./about_arrow" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">High-level explanations for building a better understanding about important topics such as the underlying data format, the cache, and how datasets are generated.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/main_classes" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 Datasets classes and methods work.</p> </a> </div> </div>
datasets/docs/source/index.mdx/0
{ "file_path": "datasets/docs/source/index.mdx", "repo_id": "datasets", "token_count": 1014 }
57
# Structure your repository To host and share your dataset, create a dataset repository on the Hugging Face Hub and upload your data files. This guide will show you how to structure your dataset repository when you upload it. A dataset with a supported structure and file format (`.txt`, `.csv`, `.parquet`, `.jsonl`, `.mp3`, `.jpg`, `.zip` etc.) are loaded automatically with [`~datasets.load_dataset`], and it'll have a dataset viewer on its dataset page on the Hub. ## Main use-case The simplest dataset structure has two files: `train.csv` and `test.csv` (this works with any supported file format). Your repository will also contain a `README.md` file, the [dataset card](dataset_card) displayed on your dataset page. ``` my_dataset_repository/ ├── README.md ├── train.csv └── test.csv ``` In this simple case, you'll get a dataset with two splits: `train` (containing examples from `train.csv`) and `test` (containing examples from `test.csv`). ## Define your splits and subsets in YAML ## Splits If you have multiple files and want to define which file goes into which split, you can use the YAML `configs` field at the top of your README.md. For example, given a repository like this one: ``` my_dataset_repository/ ├── README.md ├── data.csv └── holdout.csv ``` You can define your splits by adding the `configs` field in the YAML block at the top of your README.md: ```yaml --- configs: - config_name: default data_files: - split: train path: "data.csv" - split: test path: "holdout.csv" --- ``` You can select multiple files per split using a list of paths: ``` my_dataset_repository/ ├── README.md ├── data/ │ ├── abc.csv │ └── def.csv └── holdout/ └── ghi.csv ``` ```yaml --- configs: - config_name: default data_files: - split: train path: - "data/abc.csv" - "data/def.csv" - split: test path: "holdout/ghi.csv" --- ``` Or you can use glob patterns to automatically list all the files you need: ```yaml --- configs: - config_name: default data_files: - split: train path: "data/*.csv" - split: test path: "holdout/*.csv" --- ``` <Tip warning={true}> Note that `config_name` field is required even if you have a single configuration. </Tip> ## Configurations Your dataset might have several subsets of data that you want to be able to load separately. In that case you can define a list of configurations inside the `configs` field in YAML: ``` my_dataset_repository/ ├── README.md ├── main_data.csv └── additional_data.csv ``` ```yaml --- configs: - config_name: main_data data_files: "main_data.csv" - config_name: additional_data data_files: "additional_data.csv" --- ``` Each configuration is shown separately on the Hugging Face Hub, and can be loaded by passing its name as a second parameter: ```python from datasets import load_dataset main_data = load_dataset("my_dataset_repository", "main_data") additional_data = load_dataset("my_dataset_repository", "additional_data") ``` ## Builder parameters Not only `data_files`, but other builder-specific parameters can be passed via YAML, allowing for more flexibility on how to load the data while not requiring any custom code. For example, define which separator to use in which configuration to load your `csv` files: ```yaml --- configs: - config_name: tab data_files: "main_data.csv" sep: "\t" - config_name: comma data_files: "additional_data.csv" sep: "," --- ``` Refer to [specific builders' documentation](./package_reference/builder_classes) to see what configuration parameters they have. <Tip> You can set a default configuration using `default: true`, e.g. you can run `main_data = load_dataset("my_dataset_repository")` if you set ```yaml - config_name: main_data data_files: "main_data.csv" default: true ``` </Tip> ## Automatic splits detection If no YAML is provided, 🤗 Datasets searches for certain patterns in the dataset repository to automatically infer the dataset splits. There is an order to the patterns, beginning with the custom filename split format to treating all files as a single split if no pattern is found. ### Directory name Your data files may also be placed into different directories named `train`, `test`, and `validation` where each directory contains the data files for that split: ``` my_dataset_repository/ ├── README.md └── data/ ├── train/ │ └── bees.csv ├── test/ │ └── more_bees.csv └── validation/ └── even_more_bees.csv ``` ### Filename splits If you don't have any non-traditional splits, then you can place the split name anywhere in the data file and it is automatically inferred. The only rule is that the split name must be delimited by non-word characters, like `test-file.csv` for example instead of `testfile.csv`. Supported delimiters include underscores, dashes, spaces, dots, and numbers. For example, the following file names are all acceptable: - train split: `train.csv`, `my_train_file.csv`, `train1.csv` - validation split: `validation.csv`, `my_validation_file.csv`, `validation1.csv` - test split: `test.csv`, `my_test_file.csv`, `test1.csv` Here is an example where all the files are placed into a directory named `data`: ``` my_dataset_repository/ ├── README.md └── data/ ├── train.csv ├── test.csv └── validation.csv ``` ### Custom filename split If your dataset splits have custom names that aren't `train`, `test`, or `validation`, then you can name your data files like `data/<split_name>-xxxxx-of-xxxxx.csv`. Here is an example with three splits, `train`, `test`, and `random`: ``` my_dataset_repository/ ├── README.md └── data/ ├── train-00000-of-00003.csv ├── train-00001-of-00003.csv ├── train-00002-of-00003.csv ├── test-00000-of-00001.csv ├── random-00000-of-00003.csv ├── random-00001-of-00003.csv └── random-00002-of-00003.csv ``` ### Single split When 🤗 Datasets can't find any of the above patterns, then it'll treat all the files as a single train split. If your dataset splits aren't loading as expected, it may be due to an incorrect pattern. ### Split name keywords There are several ways to name splits. Validation splits are sometimes called "dev", and test splits may be referred to as "eval". These other split names are also supported, and the following keywords are equivalent: - train, training - validation, valid, val, dev - test, testing, eval, evaluation The structure below is a valid repository: ``` my_dataset_repository/ ├── README.md └── data/ ├── training.csv ├── eval.csv └── valid.csv ``` ### Multiple files per split If one of your splits comprises several files, 🤗 Datasets can still infer whether it is the train, validation, and test split from the file name. For example, if your train and test splits span several files: ``` my_dataset_repository/ ├── README.md ├── train_0.csv ├── train_1.csv ├── train_2.csv ├── train_3.csv ├── test_0.csv └── test_1.csv ``` Make sure all the files of your `train` set have *train* in their names (same for test and validation). Even if you add a prefix or suffix to `train` in the file name (like `my_train_file_00001.csv` for example), 🤗 Datasets can still infer the appropriate split. For convenience, you can also place your data files into different directories. In this case, the split name is inferred from the directory name. ``` my_dataset_repository/ ├── README.md └── data/ ├── train/ │ ├── shard_0.csv │ ├── shard_1.csv │ ├── shard_2.csv │ └── shard_3.csv └── test/ ├── shard_0.csv └── shard_1.csv ``` For more flexibility over how to load and generate a dataset, you can also write a [dataset loading script](./dataset_script).
datasets/docs/source/repository_structure.mdx/0
{ "file_path": "datasets/docs/source/repository_structure.mdx", "repo_id": "datasets", "token_count": 2588 }
58
# Metric Card for BERT Score ## Metric description BERTScore is an automatic evaluation metric for text generation that computes a similarity score for each token in the candidate sentence with each token in the reference sentence. It leverages the pre-trained contextual embeddings from [BERT](https://huggingface.co/bert-base-uncased) models and matches words in candidate and reference sentences by cosine similarity. Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks. ## How to use BERTScore takes 3 mandatory arguments : `predictions` (a list of string of candidate sentences), `references` (a list of strings or list of list of strings of reference sentences) and either `lang` (a string of two letters indicating the language of the sentences, in [ISO 639-1 format](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)) or `model_type` (a string specififying which model to use, according to the BERT specification). The default behavior of the metric is to use the suggested model for the target language when one is specified, otherwise to use the `model_type` indicated. ```python from datasets import load_metric bertscore = load_metric("bertscore") predictions = ["hello there", "general kenobi"] references = ["hello there", "general kenobi"] results = bertscore.compute(predictions=predictions, references=references, lang="en") ``` BERTScore also accepts multiple optional arguments: `num_layers` (int): The layer of representation to use. The default is the number of layers tuned on WMT16 correlation data, which depends on the `model_type` used. `verbose` (bool): Turn on intermediate status update. The default value is `False`. `idf` (bool or dict): Use idf weighting; can also be a precomputed idf_dict. `device` (str): On which the contextual embedding model will be allocated on. If this argument is `None`, the model lives on `cuda:0` if cuda is available. `nthreads` (int): Number of threads used for computation. The default value is `4`. `rescale_with_baseline` (bool): Rescale BERTScore with the pre-computed baseline. The default value is `False`. `batch_size` (int): BERTScore processing batch size, at least one of `model_type` or `lang`. `lang` needs to be specified when `rescale_with_baseline` is `True`. `baseline_path` (str): Customized baseline file. `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer. The default value is `False`. ## Output values BERTScore outputs a dictionary with the following values: `precision`: The [precision](https://huggingface.co/metrics/precision) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0. `recall`: The [recall](https://huggingface.co/metrics/recall) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0. `f1`: The [F1 score](https://huggingface.co/metrics/f1) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0. `hashcode:` The hashcode of the library. ### Values from popular papers The [original BERTScore paper](https://openreview.net/pdf?id=SkeHuCVFDr) reported average model selection accuracies (Hits@1) on WMT18 hybrid systems for different language pairs, which ranged from 0.004 for `en<->tr` to 0.824 for `en<->de`. For more recent model performance, see the [metric leaderboard](https://paperswithcode.com/paper/bertscore-evaluating-text-generation-with). ## Examples Maximal values with the `distilbert-base-uncased` model: ```python from datasets import load_metric bertscore = load_metric("bertscore") predictions = ["hello world", "general kenobi"] references = ["hello world", "general kenobi"] results = bertscore.compute(predictions=predictions, references=references, model_type="distilbert-base-uncased") print(results) {'precision': [1.0, 1.0], 'recall': [1.0, 1.0], 'f1': [1.0, 1.0], 'hashcode': 'distilbert-base-uncased_L5_no-idf_version=0.3.10(hug_trans=4.10.3)'} ``` Partial match with the `bert-base-uncased` model: ```python from datasets import load_metric bertscore = load_metric("bertscore") predictions = ["hello world", "general kenobi"] references = ["goodnight moon", "the sun is shining"] results = bertscore.compute(predictions=predictions, references=references, model_type="distilbert-base-uncased") print(results) {'precision': [0.7380737066268921, 0.5584042072296143], 'recall': [0.7380737066268921, 0.5889028906822205], 'f1': [0.7380737066268921, 0.5732481479644775], 'hashcode': 'bert-base-uncased_L5_no-idf_version=0.3.10(hug_trans=4.10.3)'} ``` ## Limitations and bias The [original BERTScore paper](https://openreview.net/pdf?id=SkeHuCVFDr) showed that BERTScore correlates well with human judgment on sentence-level and system-level evaluation, but this depends on the model and language pair selected. Furthermore, not all languages are supported by the metric -- see the [BERTScore supported language list](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages) for more information. Finally, calculating the BERTScore metric involves downloading the BERT model that is used to compute the score-- the default model for `en`, `roberta-large`, takes over 1.4GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `distilbert-base-uncased` is 268MB. A full list of compatible models can be found [here](https://docs.google.com/spreadsheets/d/1RKOVpselB98Nnh_EOC4A2BYn8_201tmPODpNWu4w7xI/edit#gid=0). ## Citation ```bibtex @inproceedings{bert-score, title={BERTScore: Evaluating Text Generation with BERT}, author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi}, booktitle={International Conference on Learning Representations}, year={2020}, url={https://openreview.net/forum?id=SkeHuCVFDr} } ``` ## Further References - [BERTScore Project README](https://github.com/Tiiiger/bert_score#readme) - [BERTScore ICLR 2020 Poster Presentation](https://iclr.cc/virtual_2020/poster_SkeHuCVFDr.html)
datasets/metrics/bertscore/README.md/0
{ "file_path": "datasets/metrics/bertscore/README.md", "repo_id": "datasets", "token_count": 1908 }
59
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accuracy metric for the Mathematics Aptitude Test of Heuristics (MATH) dataset.""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets _CITATION = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ _DESCRIPTION = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. """ _KWARGS_DESCRIPTION = r""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class CompetitionMathMetric(datasets.Metric): """Accuracy metric for the MATH dataset.""" def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string"), "references": datasets.Value("string"), } ), # Homepage of the metric for documentation homepage="https://github.com/hendrycks/math", # Additional links to the codebase or references codebase_urls=["https://github.com/hendrycks/math"], ) def _compute(self, predictions, references): """Returns the scores""" n_correct = 0.0 for i, j in zip(predictions, references): n_correct += 1.0 if math_equivalence.is_equiv(i, j) else 0.0 accuracy = n_correct / len(predictions) return { "accuracy": accuracy, }
datasets/metrics/competition_math/competition_math.py/0
{ "file_path": "datasets/metrics/competition_math/competition_math.py", "repo_id": "datasets", "token_count": 1181 }
60
# Metric Card for IndicGLUE ## Metric description This metric is used to compute the evaluation metric for the [IndicGLUE dataset](https://huggingface.co/datasets/indic_glue). IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - Assamese (`as`), Bengali (`bn`), Gujarati (`gu`), Hindi (`hi`), Kannada (`kn`), Malayalam (`ml`), Marathi(`mr`), Oriya(`or`), Panjabi (`pa`), Tamil(`ta`) and Telugu (`te`). ## How to use There are two steps: (1) loading the IndicGLUE metric relevant to the subset of the dataset being used for evaluation; and (2) calculating the metric. 1. **Loading the relevant IndicGLUE metric** : the subsets of IndicGLUE are the following: `wnli`, `copa`, `sna`, `csqa`, `wstp`, `inltkh`, `bbca`, `cvit-mkb-clsr`, `iitp-mr`, `iitp-pr`, `actsa-sc`, `md`, and`wiki-ner`. More information about the different subsets of the Indic GLUE dataset can be found on the [IndicGLUE dataset page](https://indicnlp.ai4bharat.org/indic-glue/). 2. **Calculating the metric**: the metric takes two inputs : one list with the predictions of the model to score and one lists of references for each translation for all subsets of the dataset except for `cvit-mkb-clsr`, where each prediction and reference is a vector of floats. ```python from datasets import load_metric indic_glue_metric = load_metric('indic_glue', 'wnli') references = [0, 1] predictions = [0, 1] results = indic_glue_metric.compute(predictions=predictions, references=references) ``` ## Output values The output of the metric depends on the IndicGLUE subset chosen, consisting of a dictionary that contains one or several of the following metrics: `accuracy`: the proportion of correct predictions among the total number of cases processed, with a range between 0 and 1 (see [accuracy](https://huggingface.co/metrics/accuracy) for more information). `f1`: the harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. `precision@10`: the fraction of the true examples among the top 10 predicted examples, with a range between 0 and 1 (see [precision](https://huggingface.co/metrics/precision) for more information). The `cvit-mkb-clsr` subset returns `precision@10`, the `wiki-ner` subset returns `accuracy` and `f1`, and all other subsets of Indic GLUE return only accuracy. ### Values from popular papers The [original IndicGlue paper](https://aclanthology.org/2020.findings-emnlp.445.pdf) reported an average accuracy of 0.766 on the dataset, which varies depending on the subset selected. ## Examples Maximal values for the WNLI subset (which outputs `accuracy`): ```python from datasets import load_metric indic_glue_metric = load_metric('indic_glue', 'wnli') references = [0, 1] predictions = [0, 1] results = indic_glue_metric.compute(predictions=predictions, references=references) print(results) {'accuracy': 1.0} ``` Minimal values for the Wiki-NER subset (which outputs `accuracy` and `f1`): ```python >>> from datasets import load_metric >>> indic_glue_metric = load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [1,0] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} ``` Partial match for the CVIT-Mann Ki Baat subset (which outputs `precision@10`) ```python >>> from datasets import load_metric >>> indic_glue_metric = load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} ``` ## Limitations and bias This metric works only with datasets that have the same format as the [IndicGLUE dataset](https://huggingface.co/datasets/glue). ## Citation ```bibtex @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ``` ## Further References - [IndicNLP website](https://indicnlp.ai4bharat.org/home/) -
datasets/metrics/indic_glue/README.md/0
{ "file_path": "datasets/metrics/indic_glue/README.md", "repo_id": "datasets", "token_count": 1527 }
61
# Metric Card for Pearson Correlation Coefficient (pearsonr) ## Metric Description Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ## How to Use This metric takes a list of predictions and a list of references as input ```python >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr']), 2) ['-0.74'] ``` ### Inputs - **predictions** (`list` of `int`): Predicted class labels, as returned by a model. - **references** (`list` of `int`): Ground truth labels. - **return_pvalue** (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. ### Output Values - **pearsonr**(`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. - **p-value**(`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. Output Example(s): ```python {'pearsonr': -0.7} ``` ```python {'p-value': 0.15} ``` #### Values from Popular Papers ### Examples Example 1-A simple example using only predictions and references. ```python >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 ``` Example 2-The same as Example 1, but that also returns the `p-value`. ```python >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 ``` ## Limitations and Bias As stated above, the calculation of the p-value relies on the assumption that each data set is normally distributed. This is not always the case, so verifying the true distribution of datasets is recommended. ## Citation(s) ```bibtex @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ``` ## Further References
datasets/metrics/pearsonr/README.md/0
{ "file_path": "datasets/metrics/pearsonr/README.md", "repo_id": "datasets", "token_count": 1387 }
62
# Metric Card for seqeval ## Metric description seqeval is a Python framework for sequence labeling evaluation. seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on. ## How to use Seqeval produces labelling scores along with its sufficient statistics from a source against one or more references. It takes two mandatory arguments: `predictions`: a list of lists of predicted labels, i.e. estimated targets as returned by a tagger. `references`: a list of lists of reference labels, i.e. the ground truth/target values. It can also take several optional arguments: `suffix` (boolean): `True` if the IOB tag is a suffix (after type) instead of a prefix (before type), `False` otherwise. The default value is `False`, i.e. the IOB tag is a prefix (before type). `scheme`: the target tagging scheme, which can be one of [`IOB1`, `IOB2`, `IOE1`, `IOE2`, `IOBES`, `BILOU`]. The default value is `None`. `mode`: whether to count correct entity labels with incorrect I/B tags as true positives or not. If you want to only count exact matches, pass `mode="strict"` and a specific `scheme` value. The default is `None`. `sample_weight`: An array-like of shape (n_samples,) that provides weights for individual samples. The default is `None`. `zero_division`: Which value to substitute as a metric value when encountering zero division. Should be one of [`0`,`1`,`"warn"`]. `"warn"` acts as `0`, but the warning is raised. ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> results = seqeval.compute(predictions=predictions, references=references) ``` ## Output values This metric returns a dictionary with a summary of scores for overall and per type: Overall: `accuracy`: the average [accuracy](https://huggingface.co/metrics/accuracy), on a scale between 0.0 and 1.0. `precision`: the average [precision](https://huggingface.co/metrics/precision), on a scale between 0.0 and 1.0. `recall`: the average [recall](https://huggingface.co/metrics/recall), on a scale between 0.0 and 1.0. `f1`: the average [F1 score](https://huggingface.co/metrics/f1), which is the harmonic mean of the precision and recall. It also has a scale of 0.0 to 1.0. Per type (e.g. `MISC`, `PER`, `LOC`,...): `precision`: the average [precision](https://huggingface.co/metrics/precision), on a scale between 0.0 and 1.0. `recall`: the average [recall](https://huggingface.co/metrics/recall), on a scale between 0.0 and 1.0. `f1`: the average [F1 score](https://huggingface.co/metrics/f1), on a scale between 0.0 and 1.0. ### Values from popular papers The 1995 "Text Chunking using Transformation-Based Learning" [paper](https://aclanthology.org/W95-0107) reported a baseline recall of 81.9% and a precision of 78.2% using non Deep Learning-based methods. More recently, seqeval continues being used for reporting performance on tasks such as [named entity detection](https://www.mdpi.com/2306-5729/6/8/84/htm) and [information extraction](https://ieeexplore.ieee.org/abstract/document/9697942/). ## Examples Maximal values (full match) : ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> references = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> results = seqeval.compute(predictions=predictions, references=references) >>> print(results) {'MISC': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'overall_precision': 1.0, 'overall_recall': 1.0, 'overall_f1': 1.0, 'overall_accuracy': 1.0} ``` Minimal values (no match): ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'B-MISC', 'I-MISC'], ['B-PER', 'I-PER', 'O']] >>> references = [['B-MISC', 'O', 'O'], ['I-PER', '0', 'I-PER']] >>> results = seqeval.compute(predictions=predictions, references=references) >>> print(results) {'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'PER': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 2}, '_': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'overall_precision': 0.0, 'overall_recall': 0.0, 'overall_f1': 0.0, 'overall_accuracy': 0.0} ``` Partial match: ```python >>> from datasets import load_metric >>> seqeval = load_metric('seqeval') >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> results = seqeval.compute(predictions=predictions, references=references) >>> print(results) {'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'overall_precision': 0.5, 'overall_recall': 0.5, 'overall_f1': 0.5, 'overall_accuracy': 0.8} ``` ## Limitations and bias seqeval supports following IOB formats (short for inside, outside, beginning) : `IOB1`, `IOB2`, `IOE1`, `IOE2`, `IOBES`, `IOBES` (only in strict mode) and `BILOU` (only in strict mode). For more information about IOB formats, refer to the [Wikipedia page](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)) and the description of the [CoNLL-2000 shared task](https://aclanthology.org/W02-2024). ## Citation ```bibtex @inproceedings{ramshaw-marcus-1995-text, title = "Text Chunking using Transformation-Based Learning", author = "Ramshaw, Lance and Marcus, Mitch", booktitle = "Third Workshop on Very Large Corpora", year = "1995", url = "https://www.aclweb.org/anthology/W95-0107", } ``` ```bibtex @misc{seqeval, title={{seqeval}: A Python framework for sequence labeling evaluation}, url={https://github.com/chakki-works/seqeval}, note={Software available from https://github.com/chakki-works/seqeval}, author={Hiroki Nakayama}, year={2018}, } ``` ## Further References - [README for seqeval at GitHub](https://github.com/chakki-works/seqeval) - [CoNLL-2000 shared task](https://www.clips.uantwerpen.be/conll2002/ner/bin/conlleval.txt)
datasets/metrics/seqeval/README.md/0
{ "file_path": "datasets/metrics/seqeval/README.md", "repo_id": "datasets", "token_count": 2355 }
63
# Copyright 2021 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Word Error Ratio (WER) metric. """ from jiwer import compute_measures import datasets _CITATION = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ _DESCRIPTION = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ _KWARGS_DESCRIPTION = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class WER(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ], ) def _compute(self, predictions=None, references=None, concatenate_texts=False): if concatenate_texts: return compute_measures(references, predictions)["wer"] else: incorrect = 0 total = 0 for prediction, reference in zip(predictions, references): measures = compute_measures(reference, prediction) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
datasets/metrics/wer/wer.py/0
{ "file_path": "datasets/metrics/wer/wer.py", "repo_id": "datasets", "token_count": 1452 }
64
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal logger = logging.get_logger(__name__) DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) def interleave_datasets( datasets: List[DatasetType], probabilities: Optional[List[float]] = None, seed: Optional[int] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """ Interleave several datasets (sources) into a single dataset. The new dataset is constructed by alternating between the sources to get the examples. You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. Note for iterable datasets: In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). Args: datasets (`List[Dataset]` or `List[IterableDataset]`): List of datasets to interleave. probabilities (`List[float]`, *optional*, defaults to `None`): If specified, the new dataset is constructed by sampling examples from one source at a time according to these probabilities. seed (`int`, *optional*, defaults to `None`): The random seed used to choose a source for each example. info ([`DatasetInfo`], *optional*): Dataset information, like description, citation, etc. <Added version="2.4.0"/> split ([`NamedSplit`], *optional*): Name of the dataset split. <Added version="2.4.0"/> stopping_strategy (`str`, defaults to `first_exhausted`): Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. Returns: [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of `IterableDataset`. Example: For regular datasets (map-style): ```python >>> from datasets import Dataset, interleave_datasets >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) >>> dataset = interleave_datasets([d1, d2, d3]) >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22] >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") >>> dataset["a"] [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) >>> dataset["a"] [10, 0, 11, 1, 2] >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") >>> dataset["a"] [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] For datasets in streaming mode (iterable): >>> from datasets import load_dataset, interleave_datasets >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) >>> dataset = interleave_datasets([d1, d2]) >>> iterator = iter(dataset) >>> next(iterator) {'text': 'Mtendere Village was inspired by the vision...} >>> next(iterator) {'text': "Média de débat d'idées, de culture...} ``` """ from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets.") for i, dataset in enumerate(datasets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.") if dataset_type is Dataset: return _interleave_map_style_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) else: return _interleave_iterable_datasets( datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy ) def concatenate_datasets( dsets: List[DatasetType], info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, axis: int = 0, ) -> DatasetType: """ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. Args: dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. axis (`{0, 1}`, defaults to `0`): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Example: ```py >>> ds3 = concatenate_datasets([ds1, ds2]) ``` """ if not dsets: raise ValueError("Unable to concatenate an empty list of datasets.") for i, dataset in enumerate(dsets): if not isinstance(dataset, (Dataset, IterableDataset)): if isinstance(dataset, (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " "is an empty dataset dictionary." ) raise ValueError( f"Dataset at position {i} has at least one split: {list(dataset)}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." ) if i == 0: dataset_type, other_type = ( (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) ) elif not isinstance(dataset, dataset_type): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) else: return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
datasets/src/datasets/combine.py/0
{ "file_path": "datasets/src/datasets/combine.py", "repo_id": "datasets", "token_count": 4607 }
65
import glob import io import os import posixpath import re import tarfile import time import xml.dom.minidom import zipfile from asyncio import TimeoutError from io import BytesIO from itertools import chain from pathlib import Path, PurePosixPath from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union from xml.etree import ElementTree as ET import fsspec from aiohttp.client_exceptions import ClientError from huggingface_hub.utils import EntryNotFoundError from packaging import version from .. import config from ..filesystems import COMPRESSION_FILESYSTEMS from ..utils.file_utils import ( get_authentication_headers_for_url, get_datasets_user_agent, http_head, is_local_path, is_relative_path, url_or_path_join, ) from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .download_config import DownloadConfig logger = get_logger(__name__) BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", ] COMPRESSION_EXTENSION_TO_PROTOCOL = { # single file compression **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}, # archive compression "zip": "zip", } SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/") MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class NonStreamableDatasetError(Exception): pass def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b) def xdirname(a): """ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xdirname function allows you to apply the dirname on the first path of the chain. Example:: >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") zip://folder1::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): a = os.path.dirname(Path(a).as_posix()) else: a = posixpath.dirname(a) # if we end up at the root of the protocol, we get for example a = 'http:' # so we have to fix it by adding the '//' that was removed: if a.endswith(":"): a += "//" return "::".join([a] + b) def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): """Extend `os.path.exists` function to support both local and remote files. Args: urlpath (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return os.path.exists(main_hop) else: urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) return fs.exists(main_hop) def xbasename(a): """ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xbasename function allows you to apply the basename on the first path of the chain. Example:: >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") file.txt """ a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a) def xsplit(a): """ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplit function allows you to apply the xsplit on the first path of the chain. Example:: >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1::https://host.com/archive.zip', 'file.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.split(Path(a).as_posix()) else: a, tail = posixpath.split(a) return "::".join([a + "//" if a.endswith(":") else a] + b), tail def xsplitext(a): """ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplitext function allows you to apply the splitext on the first path of the chain. Example:: >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1/file::https://host.com/archive.zip', '.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isfile` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isfile(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) return fs.isfile(main_hop) def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: """Extend `os.path.getsize` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `int`: optional """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.getsize(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) try: size = fs.size(main_hop) except EntryNotFoundError: raise FileNotFoundError(f"No such file: {path}") if size is None: # use xopen instead of fs.open to make data fetching more robust with xopen(path, download_config=download_config) as f: size = len(f.read()) return size def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isdir(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) inner_path = main_hop.split("://")[-1] if not inner_path.strip("/"): return True return fs.isdir(inner_path) def xrelpath(path, start=None): """Extend `os.path.relpath` function to support remote files. Args: path (`str`): URL path. start (`str`): Start URL directory path. Returns: `str` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) else: return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) def _add_retries_to_file_obj_read_method(file_obj): read = file_obj.read max_retries = config.STREAMING_READ_MAX_RETRIES def read_with_retries(*args, **kwargs): disconnect_err = None for retry in range(1, max_retries + 1): try: out = read(*args, **kwargs) break except (ClientError, TimeoutError) as err: disconnect_err = err logger.warning( f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" ) time.sleep(config.STREAMING_READ_RETRY_INTERVAL) else: raise ConnectionError("Server Disconnected") from disconnect_err return out file_obj.read = read_with_retries def _get_path_extension(path: str) -> str: # Get extension: https://foo.bar/train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz urlpath = str(urlpath) path = urlpath.split("::")[0] extension = _get_path_extension(path) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) try: with fsspec.open(urlpath, **(storage_options or {})) as f: return _get_extraction_protocol_with_magic_number(f) except FileNotFoundError: if urlpath.startswith(config.HF_ENDPOINT): raise FileNotFoundError( urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise def _prepare_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: prepared_urlpath = [] prepared_storage_options = {} for hop in urlpath.split("::"): hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) prepared_urlpath.append(hop) prepared_storage_options.update(storage_options) return "::".join(prepared_urlpath), storage_options def _prepare_single_hop_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: """ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head In particular it resolves google drive URLs It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. Storage options are formatted in the form {protocol: storage_options_for_protocol} """ token = None if download_config is None else download_config.token if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath: urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) protocol = urlpath.split("://")[0] if "://" in urlpath else "file" if download_config is not None and protocol in download_config.storage_options: storage_options = download_config.storage_options[protocol] elif download_config is not None and protocol not in download_config.storage_options: storage_options = { option_name: option_value for option_name, option_value in download_config.storage_options.items() if option_name not in fsspec.available_protocols() } else: storage_options = {} if storage_options: storage_options = {protocol: storage_options} if protocol in ["http", "https"]: storage_options[protocol] = { "headers": { **get_authentication_headers_for_url(urlpath, token=token), "user-agent": get_datasets_user_agent(), }, "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables. **(storage_options.get(protocol, {})), } if "drive.google.com" in urlpath: response = http_head(urlpath) cookies = None for k, v in response.cookies.items(): if k.startswith("download_warning"): urlpath += "&confirm=" + v cookies = response.cookies storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})} # Fix Google Drive URL to avoid Virus scan warning if "drive.google.com" in urlpath and "confirm=" not in urlpath: urlpath += "&confirm=t" if urlpath.startswith("https://raw.githubusercontent.com/"): # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 storage_options[protocol]["headers"]["Accept-Encoding"] = "identity" elif protocol == "hf": storage_options[protocol] = { "token": token, "endpoint": config.HF_ENDPOINT, **storage_options.get(protocol, {}), } # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967) if config.HF_HUB_VERSION < version.parse("0.21.0"): storage_options[protocol]["block_size"] = "default" return urlpath, storage_options def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `open` function to support remote files using `fsspec`. It also has a retry mechanism in case connection fails. The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co Args: file (`str`): Path name of the file to be opened. mode (`str`, *optional*, default "r"): Mode in which the file is opened. *args: Arguments to be passed to `fsspec.open`. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Keyword arguments to be passed to `fsspec.open`. Returns: file object """ # This works as well for `xopen(str(Path(...)))` file_str = _as_str(file) main_hop, *rest_hops = file_str.split("::") if is_local_path(main_hop): # ignore fsspec-specific kwargs kwargs.pop("block_size", None) return open(main_hop, mode, *args, **kwargs) # add headers and cookies for authentication on the HF Hub and for Google Drive file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) kwargs = {**kwargs, **(storage_options or {})} try: file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() except ValueError as e: if str(e) == "Cannot seek streaming HTTP file": raise NonStreamableDatasetError( "Streaming is not possible for this dataset because data host server doesn't support HTTP range " "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" ) from e else: raise except FileNotFoundError: if file.startswith(config.HF_ENDPOINT): raise FileNotFoundError( file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise _add_retries_to_file_obj_read_method(file_obj) return file_obj def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]: """Extend `os.listdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(path).split("::") if is_local_path(main_hop): return os.listdir(path) else: # globbing inside a zip in a private repo requires authentication path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): raise FileNotFoundError(f"Directory doesn't exist: {path}") objects = fs.listdir(inner_path) return [os.path.basename(obj["name"].strip("/")) for obj in objects] def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): """Extend `glob.glob` function to support remote files. Args: urlpath (`str`): URL path with shell-style wildcard patterns. recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more directories or subdirectories. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return glob.glob(main_hop, recursive=recursive) else: # globbing inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. # - If there is "**" in the pattern, `fs.glob` must be called anyway. inner_path = main_hop.split("://")[1] globbed_paths = fs.glob(inner_path) protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `os.walk` function to support remote files. Args: urlpath (`str`): URL root path. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Additional keyword arguments forwarded to the underlying filesystem. Yields: `tuple`: 3-tuple (dirpath, dirnames, filenames). """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): yield from os.walk(main_hop, **kwargs) else: # walking inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): return [] protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames class xPath(type(Path())): """Extension of `pathlib.Path` to support both local paths and remote URLs.""" def __str__(self): path_str = super().__str__() main_hop, *rest_hops = path_str.split("::") if is_local_path(main_hop): return main_hop path_as_posix = path_str.replace("\\", "/") path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol return path_as_posix def exists(self, download_config: Optional[DownloadConfig] = None): """Extend `pathlib.Path.exists` method to support both local and remote files. Args: download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ return xexists(str(self), download_config=download_config) def glob(self, pattern, download_config: Optional[DownloadConfig] = None): """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. download_config : mainly use token or storage_options to support different platforms and auth types. Yields: [`xPath`] """ posix_path = self.as_posix() main_hop, *rest_hops = posix_path.split("::") if is_local_path(main_hop): yield from Path(main_hop).glob(pattern) else: # globbing inside a zip in a private repo requires authentication if rest_hops: urlpath = rest_hops[0] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) storage_options = {urlpath.split("://")[0]: storage_options} posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) else: storage_options = None fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options) # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`. # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories. # - If there is "**" in the pattern, `fs.glob` must be called anyway. globbed_paths = fs.glob(xjoin(main_hop, pattern)) for globbed_path in globbed_paths: yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) def rglob(self, pattern, **kwargs): """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. Yields: [`xPath`] """ return self.glob("**/" + pattern, **kwargs) @property def parent(self) -> "xPath": """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: [`xPath`] """ return type(self)(xdirname(self.as_posix())) @property def name(self) -> str: """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).name @property def stem(self) -> str: """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).stem @property def suffix(self) -> str: """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).suffix def open(self, *args, **kwargs): """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. Args: **args: Arguments passed to :func:`fsspec.open`. **kwargs: Keyword arguments passed to :func:`fsspec.open`. Returns: `io.FileIO`: File-like object. """ return xopen(str(self), *args, **kwargs) def joinpath(self, *p: Tuple[str, ...]) -> "xPath": """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. Args: *p (`tuple` of `str`): Other path components. Returns: [`xPath`] """ return type(self)(xjoin(self.as_posix(), *p)) def __truediv__(self, p: str) -> "xPath": return self.joinpath(p) def with_suffix(self, suffix): main_hop, *rest_hops = str(self).split("::") if is_local_path(main_hop): return type(self)(str(super().with_suffix(suffix))) return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) def _as_str(path: Union[str, Path, xPath]): return str(path) if isinstance(path, xPath) else str(xPath(str(path))) def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import gzip if hasattr(filepath_or_buffer, "read"): return gzip.open(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import numpy as np if hasattr(filepath_or_buffer, "read"): return np.load(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): return pd.read_csv(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) if kwargs.get("compression", "infer") == "infer": kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): try: return pd.read_excel(filepath_or_buffer, **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) try: return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel( BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs ) def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pyarrow.parquet as pq if hasattr(filepath_or_buffer, "read"): return pq.read_table(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs) def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import scipy.io as sio if hasattr(filepath_or_buffer, "read"): return sio.loadmat(filepath_or_buffer, **kwargs) else: return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): """Extend `xml.etree.ElementTree.parse` function to support remote files. Args: source: File path or file object. parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `xml.etree.ElementTree.Element`: Root element of the given source document. """ if hasattr(source, "read"): return ET.parse(source, parser=parser) else: with xopen(source, "rb", download_config=download_config) as f: return ET.parse(f, parser=parser) def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `xml.dom.minidom.parse` function to support remote files. Args: filename_or_file (`str` or file): File path or file object. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. Returns: :obj:`xml.dom.minidom.Document`: Parsed document. """ if hasattr(filename_or_file, "read"): return xml.dom.minidom.parse(filename_or_file, **kwargs) else: with xopen(filename_or_file, "rb", download_config=download_config) as f: return xml.dom.minidom.parse(f, **kwargs) class _IterableFromGenerator(Iterable): """Utility class to create an iterable from a generator function, in order to reset the generator when needed.""" def __init__(self, generator: Callable, *args, **kwargs): self.generator = generator self.args = args self.kwargs = kwargs def __iter__(self): yield from self.generator(*self.args, **self.kwargs) class ArchiveIterable(_IterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_urlpath( cls, urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath, download_config=download_config) # Set block_size=0 to get faster streaming # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) class FilesIterable(_IterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_urlpaths( cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None ) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if xisfile(urlpath, download_config=download_config): yield urlpath elif xisdir(urlpath, download_config=download_config): for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): # in-place modification to prune the search dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if xbasename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield xjoin(dirpath, filename) else: raise FileNotFoundError(urlpath) @classmethod def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": return cls(cls._iter_from_urlpaths, urlpaths, download_config) class StreamingDownloadManager: """ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives. Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract data, but they rather return the path or url that could be opened using the `xopen` function which extends the built-in `open` function to stream data from remote files. """ is_streaming = True def __init__( self, dataset_name: Optional[str] = None, data_dir: Optional[str] = None, download_config: Optional[DownloadConfig] = None, base_path: Optional[str] = None, ): self._dataset_name = dataset_name self._data_dir = data_dir self._base_path = base_path or os.path.abspath(".") self.download_config = download_config or DownloadConfig() @property def manual_dir(self): return self._data_dir def download(self, url_or_urls): """Normalize URL(s) of files to stream data from. This is the lazy version of `DownloadManager.download` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` """ url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) return url_or_urls def _download(self, urlpath: str) -> str: urlpath = str(urlpath) if is_relative_path(urlpath): # append the relative path to the base_path urlpath = url_or_path_join(self._base_path, urlpath) return urlpath def extract(self, url_or_urls): """Add extraction protocol for given url(s) for streaming. This is the lazy version of `DownloadManager.extract` for streaming. Args: url_or_urls (`str` or `list` or `dict`): URL(s) of files to stream data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> extracted_files = dl_manager.extract(downloaded_files) ``` """ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) return urlpaths def _extract(self, urlpath: str) -> str: urlpath = str(urlpath) protocol = _get_extraction_protocol(urlpath, download_config=self.download_config) # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz path = urlpath.split("::")[0] extension = _get_path_extension(path) if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")): raise NotImplementedError( f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. " f"Please use `dl_manager.iter_archive` instead.\n\n" f"Example usage:\n\n" f"\turl = dl_manager.download(url)\n" f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n" f"\tfor filename, file in tar_archive_iterator:\n" f"\t\t..." ) if protocol is None: # no extraction return urlpath elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(urlpath.split("::")[0]) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file return f"{protocol}://{inner_file}::{urlpath}" else: return f"{protocol}://::{urlpath}" def download_and_extract(self, url_or_urls): """Prepare given `url_or_urls` for streaming (add extraction protocol). This is the lazy version of `DownloadManager.download_and_extract` for streaming. Is equivalent to: ``` urls = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls (`str` or `list` or `dict`): URL(s) to stream from data from. Each url is a `str`. Returns: url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`. """ return self.extract(self.download(url_or_urls)) def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: """Iterate over files within an archive. Args: urlpath_or_buf (`str` or `io.BufferedReader`): Archive path or archive binary file object. Yields: `tuple[str, io.BufferedReader]`: 2-tuple (path_within_archive, file_object). File object is opened in binary mode. Example: ```py >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') >>> files = dl_manager.iter_archive(archive) ``` """ if hasattr(urlpath_or_buf, "read"): return ArchiveIterable.from_buf(urlpath_or_buf) else: return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config) def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: """Iterate over files. Args: urlpaths (`str` or `list` of `str`): Root paths. Yields: str: File URL path. Example: ```py >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip') >>> files = dl_manager.iter_files(files) ``` """ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
datasets/src/datasets/download/streaming_download_manager.py/0
{ "file_path": "datasets/src/datasets/download/streaming_download_manager.py", "repo_id": "datasets", "token_count": 18792 }
66
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]): def __init__(self, features=None, **torch_tensor_kwargs): super().__init__(features=features) self.torch_tensor_kwargs = torch_tensor_kwargs import torch # noqa import torch at initialization def _consolidate(self, column): import torch if isinstance(column, list) and column: if all( isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(column) return column def _tensorize(self, value): import torch if isinstance(value, (str, bytes, type(None))): return value elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value.tolist() default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": torch.int64} elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): default_dtype = {"dtype": torch.float32} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(value, PIL.Image.Image): value = np.asarray(value) return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) def _recursive_tensorize(self, data_struct): import torch # support for torch, tf, jax etc. if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) elif isinstance(data_struct, (list, tuple)): return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) return self._tensorize(data_struct) def recursive_tensorize(self, data_struct: dict): return map_nested(self._recursive_tensorize, data_struct, map_list=False) def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "torch.Tensor": column = self.numpy_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) column = self.recursive_tensorize(column) column = self._consolidate(column) return column def format_batch(self, pa_table: pa.Table) -> Mapping: batch = self.numpy_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) batch = self.recursive_tensorize(batch) for column_name in batch: batch[column_name] = self._consolidate(batch[column_name]) return batch
datasets/src/datasets/formatting/torch_formatter.py/0
{ "file_path": "datasets/src/datasets/formatting/torch_formatter.py", "repo_id": "datasets", "token_count": 1679 }
67
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Utilities for file names.""" import itertools import os import re _uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])") _lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])") _single_underscore_re = re.compile(r"(?<!_)_(?!_)") _multiple_underscores_re = re.compile(r"(_{2,})") _split_re = r"^\w+(\.\w+)*$" INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*" def camelcase_to_snakecase(name): """Convert camel-case string to snake-case.""" name = _uppercase_uppercase_re.sub(r"\1_\2", name) name = _lowercase_uppercase_re.sub(r"\1_\2", name) return name.lower() def snakecase_to_camelcase(name): """Convert snake-case string to camel-case string.""" name = _single_underscore_re.split(name) name = [_multiple_underscores_re.split(n) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "") def filename_prefix_for_name(name): if os.path.basename(name) != name: raise ValueError(f"Should be a dataset name, not a path: {name}") return camelcase_to_snakecase(name) def filename_prefix_for_split(name, split): if os.path.basename(name) != name: raise ValueError(f"Should be a dataset name, not a path: {name}") if not re.match(_split_re, split): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.") return f"{filename_prefix_for_name(name)}-{split}" def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None): prefix = filename_prefix_for_split(dataset_name, split) if filetype_suffix: prefix += f".{filetype_suffix}" filepath = os.path.join(data_dir, prefix) return f"{filepath}*" def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None): prefix = filename_prefix_for_split(dataset_name, split) prefix = os.path.join(path, prefix) if shard_lengths: num_shards = len(shard_lengths) filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)] if filetype_suffix: filenames = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: filename = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
datasets/src/datasets/naming.py/0
{ "file_path": "datasets/src/datasets/naming.py", "repo_id": "datasets", "token_count": 1178 }
68
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging logger = logging.get_logger(__name__) class ParallelBackendConfig: backend_name = None @experimental def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): """ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either multiprocessing.Pool or joblib for parallelization. Args: function (`Callable[[Any], Any]`): Function to be applied to `iterable`. iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. disable_tqdm (`bool`): Whether to disable the tqdm progressbar. desc (`str`): Prefix for the tqdm progressbar. single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an element of `iterable`, and `rank` is used for progress bar. """ if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func ) return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func) def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): num_proc = num_proc if num_proc <= len(iterable) else len(iterable) split_kwds = [] # We organize the splits ourselve (contiguous splits) for index in range(num_proc): div = len(iterable) // num_proc mod = len(iterable) % num_proc start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc)) if len(iterable) != sum(len(i[1]) for i in split_kwds): raise ValueError( f"Error dividing inputs iterable among processes. " f"Total number of objects {len(iterable)}, " f"length: {sum(len(i[1]) for i in split_kwds)}" ) logger.info( f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}" ) initargs, initializer = None, None if not disable_tqdm: initargs, initializer = (RLock(),), tqdm.set_lock with Pool(num_proc, initargs=initargs, initializer=initializer) as pool: mapped = pool.map(single_map_nested_func, split_kwds) logger.info(f"Finished {num_proc} processes") mapped = [obj for proc_res in mapped for obj in proc_res] logger.info(f"Unpacked {len(mapped)} objects") return mapped def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc): return joblib.Parallel()( joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable ) @experimental @contextlib.contextmanager def parallel_backend(backend_name: str): """ **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization implemented by joblib. Args: backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib. Example usage: ```py with parallel_backend('spark'): dataset = load_dataset(..., num_proc=2) ``` """ ParallelBackendConfig.backend_name = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: ParallelBackendConfig.backend_name = None
datasets/src/datasets/parallel/parallel.py/0
{ "file_path": "datasets/src/datasets/parallel/parallel.py", "repo_id": "datasets", "token_count": 1700 }
69
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extends `dill` to support pickling more types and produce more consistent dumps.""" import os import sys from io import BytesIO from types import CodeType, FunctionType import dill from packaging import version from .. import config class Pickler(dill.Pickler): dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy()) _legacy_no_dict_keys_sorting = False def save(self, obj, save_persistent_id=True): obj_type = type(obj) if obj_type not in self.dispatch: if "regex" in sys.modules: import regex # type: ignore if obj_type is regex.Pattern: pklregister(obj_type)(_save_regexPattern) if "spacy" in sys.modules: import spacy # type: ignore if issubclass(obj_type, spacy.Language): pklregister(obj_type)(_save_spacyLanguage) if "tiktoken" in sys.modules: import tiktoken # type: ignore if obj_type is tiktoken.Encoding: pklregister(obj_type)(_save_tiktokenEncoding) if "torch" in sys.modules: import torch # type: ignore if issubclass(obj_type, torch.Tensor): pklregister(obj_type)(_save_torchTensor) if obj_type is torch.Generator: pklregister(obj_type)(_save_torchGenerator) # Unwrap `torch.compile`-ed modules if issubclass(obj_type, torch.nn.Module): obj = getattr(obj, "_orig_mod", obj) if "transformers" in sys.modules: import transformers # type: ignore if issubclass(obj_type, transformers.PreTrainedTokenizerBase): pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase) # Unwrap `torch.compile`-ed functions if obj_type is FunctionType: obj = getattr(obj, "_torchdynamo_orig_callable", obj) dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id) def _batch_setitems(self, items): if self._legacy_no_dict_keys_sorting: return super()._batch_setitems(items) # Ignore the order of keys in a dict try: # Faster, but fails for unorderable elements items = sorted(items) except Exception: # TypeError, decimal.InvalidOperation, etc. from datasets.fingerprint import Hasher items = sorted(items, key=lambda x: Hasher.hash(x[0])) dill.Pickler._batch_setitems(self, items) def memoize(self, obj): # Don't memoize strings since two identical strings can have different Python ids if type(obj) is not str: # noqa: E721 dill.Pickler.memoize(self, obj) def pklregister(t): """Register a custom reducer for the type.""" def proxy(func): Pickler.dispatch[t] = func return func return proxy def dump(obj, file): """Pickle an object to a file.""" Pickler(file, recurse=True).dump(obj) def dumps(obj): """Pickle an object to a string.""" file = BytesIO() dump(obj, file) return file.getvalue() if config.DILL_VERSION < version.parse("0.3.6"): def log(pickler, msg): dill._dill.log.info(msg) elif config.DILL_VERSION.release[:3] in [ version.parse("0.3.6").release, version.parse("0.3.7").release, version.parse("0.3.8").release, ]: def log(pickler, msg): dill._dill.logger.trace(pickler, msg) @pklregister(set) def _save_set(pickler, obj): log(pickler, f"Se: {obj}") try: # Faster, but fails for unorderable elements args = (sorted(obj),) except Exception: # TypeError, decimal.InvalidOperation, etc. from datasets.fingerprint import Hasher args = (sorted(obj, key=Hasher.hash),) pickler.save_reduce(set, args, obj=obj) log(pickler, "# Se") def _save_regexPattern(pickler, obj): import regex # type: ignore log(pickler, f"Re: {obj}") args = (obj.pattern, obj.flags) pickler.save_reduce(regex.compile, args, obj=obj) log(pickler, "# Re") def _save_tiktokenEncoding(pickler, obj): import tiktoken # type: ignore log(pickler, f"Enc: {obj}") args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens) pickler.save_reduce(tiktoken.Encoding, args, obj=obj) log(pickler, "# Enc") def _save_torchTensor(pickler, obj): import torch # type: ignore # `torch.from_numpy` is not picklable in `torch>=1.11.0` def create_torchTensor(np_array): return torch.from_numpy(np_array) log(pickler, f"To: {obj}") args = (obj.detach().cpu().numpy(),) pickler.save_reduce(create_torchTensor, args, obj=obj) log(pickler, "# To") def _save_torchGenerator(pickler, obj): import torch # type: ignore def create_torchGenerator(state): generator = torch.Generator() generator.set_state(state) return generator log(pickler, f"Ge: {obj}") args = (obj.get_state(),) pickler.save_reduce(create_torchGenerator, args, obj=obj) log(pickler, "# Ge") def _save_spacyLanguage(pickler, obj): import spacy # type: ignore def create_spacyLanguage(config, bytes): lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"]) lang_inst = lang_cls.from_config(config) return lang_inst.from_bytes(bytes) log(pickler, f"Sp: {obj}") args = (obj.config, obj.to_bytes()) pickler.save_reduce(create_spacyLanguage, args, obj=obj) log(pickler, "# Sp") def _save_transformersPreTrainedTokenizerBase(pickler, obj): log(pickler, f"Tok: {obj}") # Ignore the `cache` attribute state = obj.__dict__ if "cache" in state and isinstance(state["cache"], dict): state["cache"] = {} pickler.save_reduce(type(obj), (), state=state, obj=obj) log(pickler, "# Tok") if config.DILL_VERSION < version.parse("0.3.6"): @pklregister(CodeType) def _save_code(pickler, obj): """ From dill._dill.save_code This is a modified version that removes the origin (filename + line no.) of functions created in notebooks or shells for example. """ dill._dill.log.info(f"Co: {obj}") # The filename of a function is the .py file where it is defined. # Filenames of functions created in notebooks or shells start with '<' # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell # Filenames of functions created in ipykernel the filename # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" # Moreover lambda functions have a special name: '<lambda>' # ex: (lambda x: x).__code__.co_name == "<lambda>" # True # # For the hashing mechanism we ignore where the function has been defined # More specifically: # - we ignore the filename of special functions (filename starts with '<') # - we always ignore the line number # - we only use the base name of the file instead of the whole path, # to be robust in case a script is moved for example. # # Only those two lines are different from the original implementation: co_filename = ( "" if obj.co_filename.startswith("<") or ( len(obj.co_filename.split(os.path.sep)) > 1 and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") ) or obj.co_name == "<lambda>" else os.path.basename(obj.co_filename) ) co_firstlineno = 1 # The rest is the same as in the original dill implementation if dill._dill.PY3: if hasattr(obj, "co_posonlyargcount"): args = ( obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, obj.co_name, co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) pickler.save_reduce(CodeType, args, obj=obj) dill._dill.log.info("# Co") return elif config.DILL_VERSION.release[:3] in [ version.parse("0.3.6").release, version.parse("0.3.7").release, version.parse("0.3.8").release, ]: # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104 @pklregister(CodeType) def save_code(pickler, obj): dill._dill.logger.trace(pickler, "Co: %s", obj) ############################################################################################################ # Modification here for huggingface/datasets # The filename of a function is the .py file where it is defined. # Filenames of functions created in notebooks or shells start with '<' # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell # Filenames of functions created in ipykernel the filename # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" # Moreover lambda functions have a special name: '<lambda>' # ex: (lambda x: x).__code__.co_name == "<lambda>" # True # # For the hashing mechanism we ignore where the function has been defined # More specifically: # - we ignore the filename of special functions (filename starts with '<') # - we always ignore the line number # - we only use the base name of the file instead of the whole path, # to be robust in case a script is moved for example. # # Only those two lines are different from the original implementation: co_filename = ( "" if obj.co_filename.startswith("<") or ( len(obj.co_filename.split(os.path.sep)) > 1 and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") ) or obj.co_name == "<lambda>" else os.path.basename(obj.co_filename) ) co_firstlineno = 1 # The rest is the same as in the original dill implementation, except for the replacements: # - obj.co_filename => co_filename # - obj.co_firstlineno => co_firstlineno ############################################################################################################ if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args) args = ( obj.co_lnotab, # for < python 3.10 [not counted in args] obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, obj.co_qualname, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_endlinetable, obj.co_columntable, obj.co_exceptiontable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args) args = ( obj.co_lnotab, # for < python 3.10 [not counted in args] obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, obj.co_qualname, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_exceptiontable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_linetable"): # python 3.10 (16 args) args = ( obj.co_lnotab, # for < python 3.10 [not counted in args] obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_linetable, obj.co_freevars, obj.co_cellvars, ) elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) args = ( obj.co_argcount, obj.co_posonlyargcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) else: # python 3.7 (15 args) args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, co_filename, # Modification for huggingface/datasets ############################################ obj.co_name, co_firstlineno, # Modification for huggingface/datasets ######################################### obj.co_lnotab, obj.co_freevars, obj.co_cellvars, ) pickler.save_reduce(dill._dill._create_code, args, obj=obj) dill._dill.logger.trace(pickler, "# Co") return
datasets/src/datasets/utils/_dill.py/0
{ "file_path": "datasets/src/datasets/utils/_dill.py", "repo_id": "datasets", "token_count": 8380 }
70
# loading package files: https://stackoverflow.com/a/20885799 import importlib.resources as pkg_resources import logging from pathlib import Path from typing import Any, List, Tuple import yaml from . import resources from .deprecation_utils import deprecated BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils" this_url = f"{BASE_REF_URL}/{__file__}" logger = logging.getLogger(__name__) def load_yaml_resource(resource: str) -> Tuple[Any, str]: content = pkg_resources.read_text(resources, resource) return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}" readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml") FILLER_TEXT = [ "[Needs More Information]", "[More Information Needed]", "(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)", ] # Dictionary representation of section/readme, error_list, warning_list ReadmeValidatorOutput = Tuple[dict, List[str], List[str]] class Section: def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False): self.name = name self.level = level self.lines = lines self.text = "" self.is_empty_text = True self.content = {} self.parsing_error_list = [] self.parsing_warning_list = [] if self.lines is not None: self.parse(suppress_parsing_errors=suppress_parsing_errors) def parse(self, suppress_parsing_errors: bool = False): current_sub_level = "" current_lines = [] code_start = False for line in self.lines: if line.strip(" \n") == "": continue elif line.strip(" \n")[:3] == "```": code_start = not code_start elif line.split()[0] == self.level + "#" and not code_start: if current_sub_level != "": self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) current_lines = [] else: if current_lines != []: self.text += "".join(current_lines).strip() if self.text != "" and self.text not in FILLER_TEXT: self.is_empty_text = False current_lines = [] current_sub_level = " ".join(line.split()[1:]).strip(" \n") else: current_lines.append(line) else: if current_sub_level != "": if current_sub_level in self.content: self.parsing_error_list.append( f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections." ) self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) else: if current_lines != []: self.text += "".join(current_lines).strip() if self.text != "" and self.text not in FILLER_TEXT: self.is_empty_text = False if self.level == "" and not suppress_parsing_errors: if self.parsing_error_list != [] or self.parsing_warning_list != []: errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list) error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors raise ValueError(error_string) def validate(self, structure: dict) -> ReadmeValidatorOutput: """Validates a Section class object recursively using the structure provided as a dictionary. Args: structute (:obj: `dict`): The dictionary representing expected structure. Returns: :obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors. """ # Header text validation error_list = [] warning_list = [] if structure["allow_empty"] is False: # If content is expected if self.is_empty_text and self.content == {}: # If no content is found, mention it in the error_list error_list.append(f"Expected some content in section `{self.name}` but it is empty.") if structure["allow_empty_text"] is False: # If some text is expected if self.is_empty_text: # If no text is found, mention it in the error_list error_list.append( f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)." ) # Subsections Validation if structure["subsections"] is not None: # If subsections are expected if self.content == {}: # If no subsections are present values = [subsection["name"] for subsection in structure["subsections"]] # Mention the expected values in the error_list error_list.append( f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'." ) else: # If some subsections are present structure_names = [subsection["name"] for subsection in structure["subsections"]] has_missing_subsections = False for idx, name in enumerate(structure_names): if name not in self.content: # If the expected subsection is not present error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.") has_missing_subsections = True else: # If the subsection is present, validate subsection, return the result # and concat the errors from subsection to section error_list # Skip sublevel validation if current level is `###` if self.level == "###": continue else: _, subsec_error_list, subsec_warning_list = self.content[name].validate( structure["subsections"][idx] ) error_list += subsec_error_list warning_list += subsec_warning_list if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here for name in self.content: if name not in structure_names: # If an extra subsection is present warning_list.append( f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown." ) if error_list: # If there are errors, do not return the dictionary as it is invalid return {}, error_list, warning_list else: return self.to_dict(), error_list, warning_list def to_dict(self) -> dict: """Returns the dictionary representation of a section.""" return { "name": self.name, "text": self.text, "is_empty_text": self.is_empty_text, "subsections": [value.to_dict() for value in self.content.values()], } @deprecated("Use `huggingface_hub.DatasetCard` instead.") class ReadMe(Section): # Level 0 def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False): super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse self.structure = structure self.yaml_tags_line_count = -2 self.tag_count = 0 self.lines = lines if self.lines is not None: self.parse(suppress_parsing_errors=suppress_parsing_errors) def validate(self): if self.structure is None: content, error_list, warning_list = self._validate(readme_structure) else: content, error_list, warning_list = self._validate(self.structure) if error_list != [] or warning_list != []: errors = "\n".join(["-\t" + x for x in error_list + warning_list]) error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors raise ValueError(error_string) @classmethod def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False): with open(path, encoding="utf-8") as f: lines = f.readlines() return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors) @classmethod def from_string( cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False ): lines = string.split("\n") return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors) def parse(self, suppress_parsing_errors: bool = False): # Skip Tags line_count = 0 for line in self.lines: self.yaml_tags_line_count += 1 if line.strip(" \n") == "---": self.tag_count += 1 if self.tag_count == 2: break line_count += 1 if self.tag_count == 2: self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item. else: self.lines = self.lines[self.tag_count :] super().parse(suppress_parsing_errors=suppress_parsing_errors) def __str__(self): """Returns the string of dictionary representation of the ReadMe.""" return str(self.to_dict()) def _validate(self, readme_structure): error_list = [] warning_list = [] if self.yaml_tags_line_count == 0: warning_list.append("Empty YAML markers are present in the README.") elif self.tag_count == 0: warning_list.append("No YAML markers are present in the README.") elif self.tag_count == 1: warning_list.append("Only the start of YAML tags present in the README.") # Check how many first level sections are present. num_first_level_keys = len(self.content.keys()) if num_first_level_keys > 1: # If more than one, add to the error list, continue error_list.append( f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README." ) elif num_first_level_keys < 1: # If less than one, append error. error_list.append( "The README has no first-level headings. One heading is expected. Skipping further validation for this README." ) else: # If one exactly start_key = list(self.content.keys())[0] # Get the key if start_key.startswith("Dataset Card for"): # Check correct start # If the starting is correct, validate all the sections _, sec_error_list, sec_warning_list = self.content[start_key].validate( readme_structure["subsections"][0] ) error_list += sec_error_list warning_list += sec_warning_list else: # If not found, append error error_list.append( "No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." ) if error_list: # If there are errors, do not return the dictionary as it is invalid return {}, error_list, warning_list else: return self.to_dict(), error_list, warning_list if __name__ == "__main__": from argparse import ArgumentParser ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.") ap.add_argument("readme_filepath") args = ap.parse_args() readme_filepath = Path(args.readme_filepath) readme = ReadMe.from_readme(readme_filepath)
datasets/src/datasets/utils/readme.py/0
{ "file_path": "datasets/src/datasets/utils/readme.py", "repo_id": "datasets", "token_count": 5742 }
71
# Metric Card for *Current Metric* ***Metric Card Instructions:*** *Copy this file into the relevant metric folder, then fill it out and save it as README.md. Feel free to take a look at existing metric cards if you'd like examples.* ## Metric Description *Give a brief overview of this metric.* ## How to Use *Give general statement of how to use the metric* *Provide simplest possible example for using the metric* ### Inputs *List all input arguments in the format below* - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).* ### Output Values *Explain what this metric outputs (e.g. a single score, a list of scores)* *Give an example of what the metric output looks like.* *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."* #### Values from Popular Papers *Give examples, preferrably with links, to papers that have reported this metric, along with the values they have reported.* ### Examples *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.* ## Limitations and Bias *Note any known limitations or biases that the metric has, with links and references if possible.* ## Citation *Cite the source where this metric was introduced.* ## Further References *Add any useful further references.*
datasets/templates/metric_card_template.md/0
{ "file_path": "datasets/templates/metric_card_template.md", "repo_id": "datasets", "token_count": 397 }
72
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, IterableDatasetDict, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.info import DatasetInfo from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _check_parquet_dataset(dataset, expected_features): assert isinstance(dataset, Dataset) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() _check_parquet_dataset(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_dataset_from_parquet_features(features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = ParquetDatasetReader(parquet_path, features=features, cache_dir=cache_dir).read() _check_parquet_dataset(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_dataset_from_parquet_split(split, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, split=split).read() _check_parquet_dataset(dataset, expected_features) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type", [str, list]) def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path): if issubclass(path_type, str): path = parquet_path elif issubclass(path_type, list): path = [parquet_path] cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read() _check_parquet_dataset(dataset, expected_features) def test_parquet_read_geoparquet(geoparquet_path, tmp_path): cache_dir = tmp_path / "cache" dataset = ParquetDatasetReader(path_or_paths=geoparquet_path, cache_dir=cache_dir).read() expected_features = { "pop_est": "float64", "continent": "string", "name": "string", "gdp_md_est": "int64", "geometry": "binary", } assert isinstance(dataset, Dataset) assert dataset.num_rows == 5 assert dataset.num_columns == 6 assert dataset.column_names == ["pop_est", "continent", "name", "iso_a3", "gdp_md_est", "geometry"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, (DatasetDict, IterableDatasetDict)) for split in splits: dataset = dataset_dict[split] assert len(list(dataset)) == 4 assert dataset.features is not None assert set(dataset.features) == set(expected_features) for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_parquet_datasetdict_reader_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = ParquetDatasetReader( {"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory ).read() _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_parquet_datasetdict_reader_features(streaming, features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = ParquetDatasetReader( {"train": parquet_path}, features=features, cache_dir=cache_dir, streaming=streaming ).read() _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("columns", [None, ["col_1"]]) @pytest.mark.parametrize("pass_features", [False, True]) @pytest.mark.parametrize("pass_info", [False, True]) def test_parquet_datasetdict_reader_columns(streaming, columns, pass_features, pass_info, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} info = ( DatasetInfo(features=Features({feature: Value(dtype) for feature, dtype in default_expected_features.items()})) if pass_info else None ) expected_features = ( {col: default_expected_features[col] for col in columns} if columns else default_expected_features ) features = ( Features({feature: Value(dtype) for feature, dtype in expected_features.items()}) if pass_features else None ) dataset = ParquetDatasetReader( {"train": parquet_path}, columns=columns, features=features, info=info, cache_dir=cache_dir, streaming=streaming, ).read() _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_parquet_datasetdict_reader_split(split, parquet_path, tmp_path): if split: path = {split: parquet_path} else: split = "train" path = {"train": parquet_path, "test": parquet_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read() _check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def test_parquet_write(dataset, tmp_path): writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet") assert writer.write() > 0 pf = pq.ParquetFile(tmp_path / "foo.parquet") output_table = pf.read() assert dataset.data.table == output_table def test_dataset_to_parquet_keeps_features(shared_datadir, tmp_path): image_path = str(shared_datadir / "test_image_rgb.jpg") data = {"image": [image_path]} features = Features({"image": Image()}) dataset = Dataset.from_dict(data, features=features) writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet") assert writer.write() > 0 reloaded_dataset = Dataset.from_parquet(str(tmp_path / "foo.parquet")) assert dataset.features == reloaded_dataset.features reloaded_iterable_dataset = ParquetDatasetReader(str(tmp_path / "foo.parquet"), streaming=True).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected", [ (Features({"foo": Value("int32")}), None), (Features({"image": Image(), "foo": Value("int32")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ], ) def test_get_writer_batch_size(feature, expected): assert get_writer_batch_size(feature) == expected
datasets/tests/io/test_parquet.py/0
{ "file_path": "datasets/tests/io/test_parquet.py", "repo_id": "datasets", "token_count": 3621 }
73
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class DummyBeamDataset(datasets.BeamBasedBuilder): """Dummy beam dataset.""" def _info(self): return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string")}), # No default supervised_keys. supervised_keys=None, ) def _split_generators(self, dl_manager, pipeline): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_dummy_examples()})] def _build_pcollection(self, pipeline, examples): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(examples) class NestedBeamDataset(datasets.BeamBasedBuilder): """Dummy beam dataset.""" def _info(self): return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}), # No default supervised_keys. supervised_keys=None, ) def _split_generators(self, dl_manager, pipeline): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_nested_examples()}) ] def _build_pcollection(self, pipeline, examples): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(examples) def get_test_dummy_examples(): return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])] def get_test_nested_examples(): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])] class BeamBuilderTest(TestCase): @require_beam def test_download_and_prepare(self): expected_num_examples = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow") ) ) self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")})) dset = builder.as_dataset() self.assertEqual(dset["train"].num_rows, expected_num_examples) self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) self.assertDictEqual(dset["train"][0], get_test_dummy_examples()[0][1]) self.assertDictEqual( dset["train"][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) ) del dset @require_beam def test_download_and_prepare_sharded(self): import apache_beam as beam original_write_parquet = beam.io.parquetio.WriteToParquet expected_num_examples = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") with patch("apache_beam.io.parquetio.WriteToParquet") as write_parquet_mock: write_parquet_mock.side_effect = partial(original_write_parquet, num_shards=2) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertTrue( os.path.exists( os.path.join( tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow" ) ) ) self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")})) dset = builder.as_dataset() self.assertEqual(dset["train"].num_rows, expected_num_examples) self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"]), sorted(["foo", "bar", "foobar"])) self.assertTrue( os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) ) del dset @require_beam def test_no_beam_options(self): with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir) self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare) @require_beam def test_nested_features(self): expected_num_examples = len(get_test_nested_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = NestedBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow") ) ) self.assertDictEqual( builder.info.features, datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}) ) dset = builder.as_dataset() self.assertEqual(dset["train"].num_rows, expected_num_examples) self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) self.assertDictEqual(dset["train"][0], get_test_nested_examples()[0][1]) self.assertDictEqual( dset["train"][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) ) del dset
datasets/tests/test_beam.py/0
{ "file_path": "datasets/tests/test_beam.py", "repo_id": "datasets", "token_count": 3076 }
74
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def test_from_dir(files, tmp_path_factory): dataset_infos_dir = tmp_path_factory.mktemp("dset_infos_dir") if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w") as f: f.write("---\ndataset_info:\n dataset_size: 42\n---") if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w") as f: f.write("") # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w") as f: f.write('{"default": {"dataset_size": 42}}') dataset_infos = DatasetInfosDict.from_directory(dataset_infos_dir) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo): tmp_path = str(tmp_path) dataset_info.write_to_directory(tmp_path) reloaded = DatasetInfo.from_directory(tmp_path) assert dataset_info == reloaded assert os.path.exists(os.path.join(tmp_path, "dataset_info.json")) def test_dataset_info_to_yaml_dict(): dataset_info = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32")}), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1337, post_processing_size=442, dataset_size=1234, size_in_bytes=1337 + 442 + 1234, ) dataset_info_yaml_dict = dataset_info._to_yaml_dict() assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str)) dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict) reloaded = yaml.safe_load(dataset_info_yaml) assert dataset_info_yaml_dict == reloaded def test_dataset_info_to_yaml_dict_empty(): dataset_info = DatasetInfo() dataset_info_yaml_dict = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()}), DatasetInfosDict({"my_config_name": DatasetInfo()}), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42), "v2": DatasetInfo(dataset_size=1337), } ), ], ) def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict): tmp_path = str(tmp_path) dataset_infos_dict.write_to_directory(tmp_path) reloaded = DatasetInfosDict.from_directory(tmp_path) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): dataset_info.config_name = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict()) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(tmp_path, "README.md")) @pytest.mark.parametrize( "dataset_info", [ None, DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32")}), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, dataset_name="dataset_name", ), ], ) def test_from_merge_same_dataset_infos(dataset_info): num_elements = 3 if dataset_info is not None: dataset_info_list = [dataset_info.copy() for _ in range(num_elements)] else: dataset_info_list = [None] * num_elements dataset_info_merged = DatasetInfo.from_merge(dataset_info_list) if dataset_info is not None: assert dataset_info == dataset_info_merged else: assert DatasetInfo() == dataset_info_merged
datasets/tests/test_info.py/0
{ "file_path": "datasets/tests/test_info.py", "repo_id": "datasets", "token_count": 2685 }
75
import json import os import re from pathlib import Path import pytest from fsspec.registry import _registry as _fsspec_registry from fsspec.spec import AbstractBufferedFile, AbstractFileSystem from datasets.download.download_config import DownloadConfig from datasets.download.streaming_download_manager import ( StreamingDownloadManager, _get_extraction_protocol, xbasename, xexists, xgetsize, xglob, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xPath, xrelpath, xsplit, xsplitext, xwalk, ) from datasets.filesystems import COMPRESSION_FILESYSTEMS from datasets.utils.hub import hf_hub_url from .utils import require_lz4, require_zstandard, slow TEST_URL = "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/raw/main/some_text.txt" TEST_URL_CONTENT = "foo\nbar\nfoobar" TEST_GG_DRIVE_FILENAME = "train.tsv" TEST_GG_DRIVE_URL = "https://drive.google.com/uc?export=download&id=17bOgBDc3hRCoPZ89EYtKDzK-yXAWat94" TEST_GG_DRIVE_GZIPPED_URL = "https://drive.google.com/uc?export=download&id=1Bt4Garpf0QLiwkJhHJzXaVa0I0H5Qhwz" TEST_GG_DRIVE_ZIPPED_URL = "https://drive.google.com/uc?export=download&id=1k92sUfpHxKq8PXWRr7Y5aNHXwOCNUmqh" TEST_GG_DRIVE_CONTENT = """\ pokemon_name, type Charmander, fire Squirtle, water Bulbasaur, grass""" class DummyTestFS(AbstractFileSystem): protocol = "mock" _file_class = AbstractBufferedFile _fs_contents = ( {"name": "top_level", "type": "directory"}, {"name": "top_level/second_level", "type": "directory"}, {"name": "top_level/second_level/date=2019-10-01", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-01/a.parquet", "type": "file", "size": 100, }, { "name": "top_level/second_level/date=2019-10-01/b.parquet", "type": "file", "size": 100, }, {"name": "top_level/second_level/date=2019-10-02", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-02/a.parquet", "type": "file", "size": 100, }, {"name": "top_level/second_level/date=2019-10-04", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-04/a.parquet", "type": "file", "size": 100, }, {"name": "misc", "type": "directory"}, {"name": "misc/foo.txt", "type": "file", "size": 100}, {"name": "glob_test", "type": "directory", "size": 0}, {"name": "glob_test/hat", "type": "directory", "size": 0}, {"name": "glob_test/hat/^foo.txt", "type": "file", "size": 100}, {"name": "glob_test/dollar", "type": "directory", "size": 0}, {"name": "glob_test/dollar/$foo.txt", "type": "file", "size": 100}, {"name": "glob_test/lbrace", "type": "directory", "size": 0}, {"name": "glob_test/lbrace/{foo.txt", "type": "file", "size": 100}, {"name": "glob_test/rbrace", "type": "directory", "size": 0}, {"name": "glob_test/rbrace/}foo.txt", "type": "file", "size": 100}, ) def __getitem__(self, name): for item in self._fs_contents: if item["name"] == name: return item raise IndexError(f"{name} not found!") def ls(self, path, detail=True, refresh=True, **kwargs): if kwargs.pop("strip_proto", True): path = self._strip_protocol(path) files = not refresh and self._ls_from_cache(path) if not files: files = [file for file in self._fs_contents if path == self._parent(file["name"])] files.sort(key=lambda file: file["name"]) self.dircache[path.rstrip("/")] = files if detail: return files return [file["name"] for file in files] def _open( self, path, mode="rb", block_size=None, autocommit=True, cache_options=None, **kwargs, ): return self._file_class( self, path, mode, block_size, autocommit, cache_options=cache_options, **kwargs, ) @pytest.fixture def mock_fsspec(): _fsspec_registry["mock"] = DummyTestFS yield del _fsspec_registry["mock"] def _readd_double_slash_removed_by_path(path_as_posix: str) -> str: """Path(...) on an url path like zip://file.txt::http://host.com/data.zip converts the :// to :/ This function readds the :// It handles cases like: - https://host.com/data.zip - C://data.zip - zip://file.txt::https://host.com/data.zip - zip://file.txt::/Users/username/data.zip - zip://file.txt::C://data.zip Args: path_as_posix (str): output of Path(...).as_posix() Returns: str: the url path with :// instead of :/ """ return re.sub("([A-z]:/)([A-z:])", r"\g<1>/\g<2>", path_as_posix) @pytest.mark.parametrize( "input_path, paths_to_join, expected_path", [ ( "https://host.com/archive.zip", ("file.txt",), "https://host.com/archive.zip/file.txt", ), ( "zip://::https://host.com/archive.zip", ("file.txt",), "zip://file.txt::https://host.com/archive.zip", ), ( "zip://folder::https://host.com/archive.zip", ("file.txt",), "zip://folder/file.txt::https://host.com/archive.zip", ), ( ".", ("file.txt",), os.path.join(".", "file.txt"), ), ( str(Path().resolve()), ("file.txt",), str((Path().resolve() / "file.txt")), ), ], ) def test_xjoin(input_path, paths_to_join, expected_path): output_path = xjoin(input_path, *paths_to_join) assert output_path == expected_path output_path = xPath(input_path).joinpath(*paths_to_join) assert output_path == xPath(expected_path) @pytest.mark.parametrize( "input_path, expected_path", [ (str(Path(__file__).resolve()), str(Path(__file__).resolve().parent)), ("https://host.com/archive.zip", "https://host.com"), ( "zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", ), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://folder::https://host.com/archive.zip", ), ], ) def test_xdirname(input_path, expected_path): from datasets.download.streaming_download_manager import xdirname output_path = xdirname(input_path) output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) @pytest.mark.parametrize( "input_path, exists", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), ], ) def test_xexists(input_path, exists, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xexists(input_path) is exists @pytest.mark.integration def test_xexists_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xexists(root_url + "data/text_data.txt", download_config=download_config) assert not xexists(root_url + "file_that_doesnt_exist.txt", download_config=download_config) @pytest.mark.parametrize( "input_path, expected_head_and_tail", [ ( str(Path(__file__).resolve()), (str(Path(__file__).resolve().parent), str(Path(__file__).resolve().name)), ), ("https://host.com/archive.zip", ("https://host.com", "archive.zip")), ("zip://file.txt::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "file.txt")), ("zip://folder::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "folder")), ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), ], ) def test_xsplit(input_path, expected_head_and_tail): output_path, tail = xsplit(input_path) expected_path, expected_tail = expected_head_and_tail output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) assert output_path == expected_path assert tail == expected_tail @pytest.mark.parametrize( "input_path, expected_path_and_ext", [ ( str(Path(__file__).resolve()), (str(Path(__file__).resolve().with_suffix("")), str(Path(__file__).resolve().suffix)), ), ("https://host.com/archive.zip", ("https://host.com/archive", ".zip")), ("zip://file.txt::https://host.com/archive.zip", ("zip://file::https://host.com/archive.zip", ".txt")), ("zip://folder::https://host.com/archive.zip", ("zip://folder::https://host.com/archive.zip", "")), ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), ], ) def test_xsplitext(input_path, expected_path_and_ext): output_path, ext = xsplitext(input_path) expected_path, expected_ext = expected_path_and_ext output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) assert output_path == expected_path assert ext == expected_ext def test_xopen_local(text_path): with xopen(text_path, "r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert list(f) == list(expected_file) with xPath(text_path).open("r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert list(f) == list(expected_file) @pytest.mark.integration def test_xopen_remote(): with xopen(TEST_URL, "r", encoding="utf-8") as f: assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) with xPath(TEST_URL).open("r", encoding="utf-8") as f: assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) @pytest.mark.parametrize( "input_path, expected_paths", [ ("tmp_path", ["file1.txt", "file2.txt"]), ("mock://", ["glob_test", "misc", "top_level"]), ("mock://top_level", ["second_level"]), ("mock://top_level/second_level/date=2019-10-01", ["a.parquet", "b.parquet"]), ], ) def test_xlistdir(input_path, expected_paths, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) for file in ["file1.txt", "file2.txt"]: (tmp_path / file).touch() output_paths = sorted(xlistdir(input_path)) assert output_paths == expected_paths @pytest.mark.integration def test_xlistdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(xlistdir("zip://::" + root_url, download_config=download_config)) == 1 assert len(xlistdir("zip://main_dir::" + root_url, download_config=download_config)) == 2 with pytest.raises(FileNotFoundError): xlistdir("zip://qwertyuiop::" + root_url, download_config=download_config) with pytest.raises(FileNotFoundError): xlistdir(root_url, download_config=download_config) @pytest.mark.parametrize( "input_path, isdir", [ ("tmp_path", True), ("tmp_path/file.txt", False), ("mock://", True), ("mock://top_level", True), ("mock://dir_that_doesnt_exist", False), ], ) def test_xisdir(input_path, isdir, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xisdir(input_path) == isdir @pytest.mark.integration def test_xisdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert xisdir("zip://::" + root_url, download_config=download_config) is True assert xisdir("zip://main_dir::" + root_url, download_config=download_config) is True assert xisdir("zip://qwertyuiop::" + root_url, download_config=download_config) is False assert xisdir(root_url, download_config=download_config) is False @pytest.mark.parametrize( "input_path, isfile", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ], ) def test_xisfile(input_path, isfile, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xisfile(input_path) == isfile @pytest.mark.integration def test_xisfile_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xisfile(root_url + "data/text_data.txt", download_config=download_config) is True assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False @pytest.mark.parametrize( "input_path, size", [ ("tmp_path/file.txt", 100), ("mock://", 0), ("mock://top_level/second_level/date=2019-10-01/a.parquet", 100), ], ) def test_xgetsize(input_path, size, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() (tmp_path / "file.txt").write_bytes(b"x" * 100) assert xgetsize(input_path) == size @pytest.mark.integration def test_xgetsize_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xgetsize(root_url + "data/text_data.txt", download_config=download_config) == 39 with pytest.raises(FileNotFoundError): xgetsize(root_url + "qwertyuiop", download_config=download_config) @pytest.mark.parametrize( "input_path, expected_paths", [ ("tmp_path/*.txt", ["file1.txt", "file2.txt"]), ("mock://*", ["mock://glob_test", "mock://misc", "mock://top_level"]), ("mock://top_*", ["mock://top_level"]), ( "mock://top_level/second_level/date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level/second_level/date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xglob(input_path, expected_paths, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) expected_paths = [str(tmp_path / file) for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() output_paths = sorted(xglob(input_path)) assert output_paths == expected_paths @pytest.mark.integration def test_xglob_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(xglob("zip://**::" + root_url, download_config=download_config)) == 3 assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0 @pytest.mark.parametrize( "input_path, expected_outputs", [ ("tmp_path", [("", [], ["file1.txt", "file2.txt", "README.md"])]), ( "mock://top_level/second_level", [ ("mock://top_level/second_level", ["date=2019-10-01", "date=2019-10-02", "date=2019-10-04"], []), ("mock://top_level/second_level/date=2019-10-01", [], ["a.parquet", "b.parquet"]), ("mock://top_level/second_level/date=2019-10-02", [], ["a.parquet"]), ("mock://top_level/second_level/date=2019-10-04", [], ["a.parquet"]), ], ), ], ) def test_xwalk(input_path, expected_outputs, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) expected_outputs = sorted( [ (str(tmp_path / dirpath).rstrip("/"), sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in expected_outputs ] ) for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() outputs = sorted(xwalk(input_path)) outputs = [(dirpath, sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in outputs] assert outputs == expected_outputs @pytest.mark.integration def test_xwalk_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(list(xwalk("zip://::" + root_url, download_config=download_config))) == 2 assert len(list(xwalk("zip://main_dir::" + root_url, download_config=download_config))) == 1 assert len(list(xwalk("zip://qwertyuiop::" + root_url, download_config=download_config))) == 0 @pytest.mark.parametrize( "input_path, start_path, expected_path", [ ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1", "dir2/file.txt".replace("/", os.path.sep)), ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1/dir2".replace("/", os.path.sep), "file.txt"), ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "file.txt"), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "folder/file.txt", ), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://folder::https://host.com/archive.zip", "file.txt", ), ], ) def test_xrelpath(input_path, start_path, expected_path): output_path = xrelpath(input_path, start=start_path) assert output_path == expected_path class TestxPath: @pytest.mark.parametrize( "input_path", [ "https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip", "file.txt", str(Path().resolve() / "file.txt"), ], ) def test_xpath_str(self, input_path): assert str(xPath(input_path)) == input_path @pytest.mark.parametrize( "input_path, expected_path", [ ("https://host.com/archive.zip", "https://host.com/archive.zip"), ("zip://file.txt::https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip"), ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip"), ("file.txt", "file.txt"), (str(Path().resolve() / "file.txt"), (Path().resolve() / "file.txt").as_posix()), ], ) def test_xpath_as_posix(self, input_path, expected_path): assert xPath(input_path).as_posix() == expected_path @pytest.mark.parametrize( "input_path, exists", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), ], ) def test_xpath_exists(self, input_path, exists, tmp_path, mock_fsspec): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xexists(input_path) is exists @pytest.mark.parametrize( "input_path, pattern, expected_paths", [ ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), ("mock://", "*", ["mock://glob_test", "mock://misc", "mock://top_level"]), ("mock://", "top_*", ["mock://top_level"]), ( "mock://top_level/second_level", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level/second_level", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xpath_glob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec): if input_path == "tmp_path": input_path = tmp_path expected_paths = [tmp_path / file for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() else: expected_paths = [Path(file) for file in expected_paths] output_paths = sorted(xPath(input_path).glob(pattern)) assert output_paths == expected_paths @pytest.mark.parametrize( "input_path, pattern, expected_paths", [ ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), ( "mock://", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ( "mock://top_level", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xpath_rglob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec): if input_path == "tmp_path": input_path = tmp_path dir_path = tmp_path / "dir" dir_path.mkdir() expected_paths = [dir_path / file for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (dir_path / file).touch() else: expected_paths = [Path(file) for file in expected_paths] output_paths = sorted(xPath(input_path).rglob(pattern)) assert output_paths == expected_paths @pytest.mark.parametrize( "input_path, expected_path", [ ("https://host.com/archive.zip", "https://host.com"), ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip"), ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir::https://host.com/archive.zip"), ("file.txt", ""), (str(Path().resolve() / "file.txt"), str(Path().resolve())), ], ) def test_xpath_parent(self, input_path, expected_path): assert xPath(input_path).parent == xPath(expected_path) @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", "archive.zip"), ("zip://file.txt::https://host.com/archive.zip", "file.txt"), ("zip://dir/file.txt::https://host.com/archive.zip", "file.txt"), ("file.txt", "file.txt"), (str(Path().resolve() / "file.txt"), "file.txt"), ], ) def test_xpath_name(self, input_path, expected): assert xPath(input_path).name == expected @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", "archive"), ("zip://file.txt::https://host.com/archive.zip", "file"), ("zip://dir/file.txt::https://host.com/archive.zip", "file"), ("file.txt", "file"), (str(Path().resolve() / "file.txt"), "file"), ], ) def test_xpath_stem(self, input_path, expected): assert xPath(input_path).stem == expected @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", ".zip"), ("zip://file.txt::https://host.com/archive.zip", ".txt"), ("zip://dir/file.txt::https://host.com/archive.zip", ".txt"), ("file.txt", ".txt"), (str(Path().resolve() / "file.txt"), ".txt"), ], ) def test_xpath_suffix(self, input_path, expected): assert xPath(input_path).suffix == expected @pytest.mark.parametrize( "input_path, suffix, expected", [ ("https://host.com/archive.zip", ".ann", "https://host.com/archive.ann"), ("zip://file.txt::https://host.com/archive.zip", ".ann", "zip://file.ann::https://host.com/archive.zip"), ( "zip://dir/file.txt::https://host.com/archive.zip", ".ann", "zip://dir/file.ann::https://host.com/archive.zip", ), ("file.txt", ".ann", "file.ann"), (str(Path().resolve() / "file.txt"), ".ann", str(Path().resolve() / "file.ann")), ], ) def test_xpath_with_suffix(self, input_path, suffix, expected): assert xPath(input_path).with_suffix(suffix) == xPath(expected) @pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"]) def test_streaming_dl_manager_download_dummy_path(urlpath): dl_manager = StreamingDownloadManager() assert dl_manager.download(urlpath) == urlpath def test_streaming_dl_manager_download(text_path): dl_manager = StreamingDownloadManager() out = dl_manager.download(text_path) assert out == text_path with xopen(out, encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"]) def test_streaming_dl_manager_download_and_extract_no_extraction(urlpath): dl_manager = StreamingDownloadManager() assert dl_manager.download_and_extract(urlpath) == urlpath def test_streaming_dl_manager_extract(text_gz_path, text_path): dl_manager = StreamingDownloadManager() output_path = dl_manager.extract(text_gz_path) path = os.path.basename(text_gz_path) path = path[: path.rindex(".")] assert output_path == f"gzip://{path}::{text_gz_path}" fsspec_open_file = xopen(output_path, encoding="utf-8") with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() def test_streaming_dl_manager_download_and_extract_with_extraction(text_gz_path, text_path): dl_manager = StreamingDownloadManager() output_path = dl_manager.download_and_extract(text_gz_path) path = os.path.basename(text_gz_path) path = path[: path.rindex(".")] assert output_path == f"gzip://{path}::{text_gz_path}" fsspec_open_file = xopen(output_path, encoding="utf-8") with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize( "input_path, filename, expected_path", [("https://domain.org/archive.zip", "filename.jsonl", "zip://filename.jsonl::https://domain.org/archive.zip")], ) def test_streaming_dl_manager_download_and_extract_with_join(input_path, filename, expected_path): dl_manager = StreamingDownloadManager() extracted_path = dl_manager.download_and_extract(input_path) output_path = xjoin(extracted_path, filename) assert output_path == expected_path @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_streaming_dl_manager_extract_all_supported_single_file_compression_types( compression_fs_class, gz_file, xz_file, zstd_file, bz2_file, lz4_file, text_file ): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = input_paths[compression_fs_class.protocol] if input_path is None: reason = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lz4.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) dl_manager = StreamingDownloadManager() output_path = dl_manager.extract(input_path) path = os.path.basename(input_path) path = path[: path.rindex(".")] assert output_path == f"{compression_fs_class.protocol}://{path}::{input_path}" fsspec_open_file = xopen(output_path, encoding="utf-8") with fsspec_open_file as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize( "urlpath, expected_protocol", [ ("zip://train-00000.json.gz::https://foo.bar/data.zip", "gzip"), ("https://foo.bar/train.json.gz?dl=1", "gzip"), ("http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip", "zip"), ("https://github.com/user/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true", "zip"), ("https://github.com/user/repo/blob/master/data/morph_train.tsv?raw=true", None), ("https://repo.org/bitstream/handle/20.500.12185/346/annotated_corpus.zip?sequence=3&isAllowed=y", "zip"), ("https://zenodo.org/record/2787612/files/SICK.zip?download=1", "zip"), ], ) def test_streaming_dl_manager_get_extraction_protocol(urlpath, expected_protocol): assert _get_extraction_protocol(urlpath) == expected_protocol @pytest.mark.parametrize( "urlpath, expected_protocol", [ (TEST_GG_DRIVE_GZIPPED_URL, "gzip"), (TEST_GG_DRIVE_ZIPPED_URL, "zip"), ], ) @slow # otherwise it spams Google Drive and the CI gets banned def test_streaming_dl_manager_get_extraction_protocol_gg_drive(urlpath, expected_protocol): assert _get_extraction_protocol(urlpath) == expected_protocol @pytest.mark.parametrize( "urlpath", [ "zip://train-00000.tar.gz::https://foo.bar/data.zip", "https://foo.bar/train.tar.gz", "https://foo.bar/train.tgz", "https://foo.bar/train.tar", ], ) def test_streaming_dl_manager_extract_throws(urlpath): with pytest.raises(NotImplementedError): _ = StreamingDownloadManager().extract(urlpath) @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive(): with xopen(TEST_GG_DRIVE_URL) as f: assert f.read() == TEST_GG_DRIVE_CONTENT @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive_no_extract(): urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_URL) with xopen(urlpath) as f: assert f.read() == TEST_GG_DRIVE_CONTENT @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive_gzipped(): urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_GZIPPED_URL) with xopen(urlpath) as f: assert f.read() == TEST_GG_DRIVE_CONTENT @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive_zipped(): urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_ZIPPED_URL) all_files = list(xglob(xjoin(urlpath, "*"))) assert len(all_files) == 1 assert xbasename(all_files[0]) == TEST_GG_DRIVE_FILENAME with xopen(all_files[0]) as f: assert f.read() == TEST_GG_DRIVE_CONTENT def _test_jsonl(path, file): assert path.endswith(".jsonl") for num_items, line in enumerate(file, start=1): item = json.loads(line.decode("utf-8")) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"]) def test_iter_archive_path(archive_jsonl, request): archive_jsonl_path = request.getfixturevalue(archive_jsonl) dl_manager = StreamingDownloadManager() archive_iterable = dl_manager.iter_archive(archive_jsonl_path) num_jsonl = 0 for num_jsonl, (path, file) in enumerate(archive_iterable, start=1): _test_jsonl(path, file) assert num_jsonl == 2 # do it twice to make sure it's reset correctly num_jsonl = 0 for num_jsonl, (path, file) in enumerate(archive_iterable, start=1): _test_jsonl(path, file) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"]) def test_iter_archive_file(archive_nested_jsonl, request): archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl) dl_manager = StreamingDownloadManager() files_iterable = dl_manager.iter_archive(archive_nested_jsonl_path) num_tar, num_jsonl = 0, 0 for num_tar, (path, file) in enumerate(files_iterable, start=1): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): _test_jsonl(subpath, subfile) assert num_tar == 1 assert num_jsonl == 2 # do it twice to make sure it's reset correctly num_tar, num_jsonl = 0, 0 for num_tar, (path, file) in enumerate(files_iterable, start=1): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): _test_jsonl(subpath, subfile) assert num_tar == 1 assert num_jsonl == 2 def test_iter_files(data_dir_with_hidden_files): dl_manager = StreamingDownloadManager() for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1): assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2 def test_xnumpy_load(tmp_path): import numpy as np expected_x = np.arange(10) npy_path = tmp_path / "data-x.npy" np.save(npy_path, expected_x) x = xnumpy_load(npy_path) assert np.array_equal(x, expected_x) npz_path = tmp_path / "data.npz" np.savez(npz_path, x=expected_x) with xnumpy_load(npz_path) as f: x = f["x"] assert np.array_equal(x, expected_x)
datasets/tests/test_streaming_download_manager.py/0
{ "file_path": "datasets/tests/test_streaming_download_manager.py", "repo_id": "datasets", "token_count": 17220 }
76
# Setup [[setup]] After all this information, it's time to get started. We're going to do two things: 1. **Create your Hugging Face account** if it's not already done 2. **Sign up to Discord and introduce yourself** (don't be shy 🤗) ### Let's create my Hugging Face account (If it's not already done) create an account to HF <a href="https://huggingface.co/join">here</a> ### Let's join our Discord server You can now sign up for our Discord Server. This is the place where you **can chat with the community and with us, create and join study groups to grow with each other and more** 👉🏻 Join our discord server <a href="https://discord.gg/ydHrjt3WP5">here.</a> When you join, remember to introduce yourself in #introduce-yourself and sign-up for reinforcement channels in #channels-and-roles. We have multiple RL-related channels: - `rl-announcements`: where we give the latest information about the course. - `rl-discussions`: where you can chat about RL and share information. - `rl-study-group`: where you can create and join study groups. - `rl-i-made-this`: where you can share your projects and models. If this is your first time using Discord, we wrote a Discord 101 to get the best practices. Check the next section. Congratulations! **You've just finished the on-boarding**. You're now ready to start to learn Deep Reinforcement Learning. Have fun! ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit0/setup.mdx/0
{ "file_path": "deep-rl-class/units/en/unit0/setup.mdx", "repo_id": "deep-rl-class", "token_count": 389 }
77
# Conclusion [[conclusion]] Congrats on finishing this chapter! There was a lot of information. And congrats on finishing the tutorials. You’ve just implemented your first RL agent from scratch and shared it on the Hub 🥳. Implementing from scratch when you study a new architecture **is important to understand how it works.** It's **normal if you still feel confused** by all these elements. **This was the same for me and for everyone who studies RL.** Take time to really grasp the material before continuing. In the next chapter, we’re going to dive deeper by studying our first Deep Reinforcement Learning algorithm based on Q-Learning: Deep Q-Learning. And you'll train a **DQN agent with <a href="https://github.com/DLR-RM/rl-baselines3-zoo">RL-Baselines3 Zoo</a> to play Atari Games**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Atari environments"/> Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unit2/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 337 }
78
# The Deep Q-Network (DQN) [[deep-q-network]] This is the architecture of our Deep Q-Learning network: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/deep-q-network.jpg" alt="Deep Q Network"/> As input, we take a **stack of 4 frames** passed through the network as a state and output a **vector of Q-values for each possible action at that state**. Then, like with Q-Learning, we just need to use our epsilon-greedy policy to select which action to take. When the Neural Network is initialized, **the Q-value estimation is terrible**. But during training, our Deep Q-Network agent will associate a situation with the appropriate action and **learn to play the game well**. ## Preprocessing the input and temporal limitation [[preprocessing]] We need to **preprocess the input**. It’s an essential step since we want to **reduce the complexity of our state to reduce the computation time needed for training**. To achieve this, we **reduce the state space to 84x84 and grayscale it**. We can do this since the colors in Atari environments don't add important information. This is a big improvement since we **reduce our three color channels (RGB) to 1**. We can also **crop a part of the screen in some games** if it does not contain important information. Then we stack four frames together. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/preprocessing.jpg" alt="Preprocessing"/> **Why do we stack four frames together?** We stack frames together because it helps us **handle the problem of temporal limitation**. Let’s take an example with the game of Pong. When you see this frame: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation.jpg" alt="Temporal Limitation"/> Can you tell me where the ball is going? No, because one frame is not enough to have a sense of motion! But what if I add three more frames? **Here you can see that the ball is going to the right**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/temporal-limitation-2.jpg" alt="Temporal Limitation"/> That’s why, to capture temporal information, we stack four frames together. Then the stacked frames are processed by three convolutional layers. These layers **allow us to capture and exploit spatial relationships in images**. But also, because the frames are stacked together, **we can exploit some temporal properties across those frames**. If you don't know what convolutional layers are, don't worry. You can check out [Lesson 4 of this free Deep Learning Course by Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) Finally, we have a couple of fully connected layers that output a Q-value for each possible action at that state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/deep-q-network.jpg" alt="Deep Q Network"/> So, we see that Deep Q-Learning uses a neural network to approximate, given a state, the different Q-values for each possible action at that state. Now let's study the Deep Q-Learning algorithm.
deep-rl-class/units/en/unit3/deep-q-network.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/deep-q-network.mdx", "repo_id": "deep-rl-class", "token_count": 888 }
79
# Bonus: Learn to create your own environments with Unity and MLAgents **You can create your own reinforcement learning environments with Unity and MLAgents**. Using a game engine such as Unity can be intimidating at first, but here are the steps you can take to learn smoothly. ## Step 1: Know how to use Unity - The best way to learn Unity is to do ["Create with Code" course](https://learn.unity.com/course/create-with-code): it's a series of videos for beginners where **you will create 5 small games with Unity**. ## Step 2: Create the simplest environment with this tutorial - Then, when you know how to use Unity, you can create your [first basic RL environment using this tutorial](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Learning-Environment-Create-New.md). ## Step 3: Iterate and create nice environments - Now that you've created your first simple environment you can iterate to more complex ones using the [MLAgents documentation (especially Designing Agents and Agent part)](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/) - In addition, you can take this free course ["Create a hummingbird environment"](https://learn.unity.com/course/ml-agents-hummingbirds) by [Adam Kelly](https://twitter.com/aktwelve) Have fun! And if you create custom environments don't hesitate to share them to the `#rl-i-made-this` discord channel.
deep-rl-class/units/en/unit5/bonus.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/bonus.mdx", "repo_id": "deep-rl-class", "token_count": 360 }
80
# Additional Readings [[additional-readings]] ## An introduction to multi-agents - [Multi-agent reinforcement learning: An overview](https://www.dcsc.tudelft.nl/~bdeschutter/pub/rep/10_003.pdf) - [Multiagent Reinforcement Learning, Marc Lanctot](https://rlss.inria.fr/files/2019/07/RLSS_Multiagent.pdf) - [Example of a multi-agent environment](https://www.mathworks.com/help/reinforcement-learning/ug/train-3-agents-for-area-coverage.html?s_eid=PSM_15028) - [A list of different multi-agent environments](https://agents.inf.ed.ac.uk/blog/multiagent-learning-environments/) - [Multi-Agent Reinforcement Learning: Independent vs. Cooperative Agents](https://bit.ly/3nVK7My) - [Dealing with Non-Stationarity in Multi-Agent Deep Reinforcement Learning](https://bit.ly/3v7LxaT) ## Self-Play and MA-POCA - [Self Play Theory and with MLAgents](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) - [Training complex behavior with MLAgents](https://blog.unity.com/technology/ml-agents-v20-release-now-supports-training-complex-cooperative-behaviors) - [MLAgents plays dodgeball](https://blog.unity.com/technology/ml-agents-plays-dodgeball) - [On the Use and Misuse of Absorbing States in Multi-agent Reinforcement Learning (MA-POCA)](https://arxiv.org/pdf/2111.05992.pdf)
deep-rl-class/units/en/unit7/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 432 }
81
# The intuition behind PPO [[the-intuition-behind-ppo]] The idea with Proximal Policy Optimization (PPO) is that we want to improve the training stability of the policy by limiting the change you make to the policy at each training epoch: **we want to avoid having too large of a policy update.** For two reasons: - We know empirically that smaller policy updates during training are **more likely to converge to an optimal solution.** - A too-big step in a policy update can result in falling “off the cliff” (getting a bad policy) **and taking a long time or even having no possibility to recover.** <figure class="image table text-center m-0 w-full"> <img class="center" src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/cliff.jpg" alt="Policy Update cliff"/> <figcaption>Taking smaller policy updates to improve the training stability</figcaption> <figcaption>Modified version from RL — Proximal Policy Optimization (PPO) <a href="https://jonathan-hui.medium.com/rl-proximal-policy-optimization-ppo-explained-77f014ec3f12">Explained by Jonathan Hui</a></figcaption> </figure> **So with PPO, we update the policy conservatively**. To do so, we need to measure how much the current policy changed compared to the former one using a ratio calculation between the current and former policy. And we clip this ratio in a range \\( [1 - \epsilon, 1 + \epsilon] \\), meaning that we **remove the incentive for the current policy to go too far from the old one (hence the proximal policy term).**
deep-rl-class/units/en/unit8/intuition-behind-ppo.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/intuition-behind-ppo.mdx", "repo_id": "deep-rl-class", "token_count": 426 }
82
# Language models in RL ## LMs encode useful knowledge for agents **Language models** (LMs) can exhibit impressive abilities when manipulating text such as question-answering or even step-by-step reasoning. Additionally, their training on massive text corpora allowed them to **encode various types of knowledge including abstract ones about the physical rules of our world** (for instance what is possible to do with an object, what happens when one rotates an object…). A natural question recently studied was whether such knowledge could benefit agents such as robots when trying to solve everyday tasks. And while these works showed interesting results, the proposed agents lacked any learning method. **This limitation prevents these agent from adapting to the environment (e.g. fixing wrong knowledge) or learning new skills.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/language.png" alt="Language"> <figcaption>Source: <a href="https://ai.googleblog.com/2022/08/towards-helpful-robots-grounding.html">Towards Helpful Robots: Grounding Language in Robotic Affordances</a></figcaption> </figure> ## LMs and RL There is therefore a potential synergy between LMs which can bring knowledge about the world, and RL which can align and correct this knowledge by interacting with an environment. It is especially interesting from a RL point-of-view as the RL field mostly relies on the **Tabula-rasa** setup where everything is learned from scratch by the agent leading to: 1) Sample inefficiency 2) Unexpected behaviors from humans’ eyes As a first attempt, the paper [“Grounding Large Language Models with Online Reinforcement Learning”](https://arxiv.org/abs/2302.02662v1) tackled the problem of **adapting or aligning a LM to a textual environment using PPO**. They showed that the knowledge encoded in the LM lead to a fast adaptation to the environment (opening avenues for sample efficient RL agents) but also that such knowledge allowed the LM to better generalize to new tasks once aligned. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/papier_v4.mp4" type="video/mp4" controls /> Another direction studied in [“Guiding Pretraining in Reinforcement Learning with Large Language Models”](https://arxiv.org/abs/2302.06692) was to keep the LM frozen but leverage its knowledge to **guide an RL agent’s exploration**. Such a method allows the RL agent to be guided towards human-meaningful and plausibly useful behaviors without requiring a human in the loop during training. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/language2.png" alt="Language"> <figcaption> Source: <a href="https://ai.googleblog.com/2022/08/towards-helpful-robots-grounding.html"> Towards Helpful Robots: Grounding Language in Robotic Affordances</a> </figcaption> </figure> Several limitations make these works still very preliminary such as the need to convert the agent's observation to text before giving it to a LM as well as the compute cost of interacting with very large LMs. ## Further reading For more information we recommend you check out the following resources: - [Google Research, 2022 & beyond: Robotics](https://ai.googleblog.com/2023/02/google-research-2022-beyond-robotics.html) - [Pre-Trained Language Models for Interactive Decision-Making](https://arxiv.org/abs/2202.01771) - [Grounding Large Language Models with Online Reinforcement Learning](https://arxiv.org/abs/2302.02662v1) - [Guiding Pretraining in Reinforcement Learning with Large Language Models](https://arxiv.org/abs/2302.06692) ## Author This section was written by <a href="https://twitter.com/ClementRomac"> Clément Romac </a>
deep-rl-class/units/en/unitbonus3/language-models.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/language-models.mdx", "repo_id": "deep-rl-class", "token_count": 1011 }
83
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples scripts src tests utils benchmarks modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix; \ ruff format $(modified_py_files);\ else \ echo "No library .py files were modified"; \ fi # Update src/diffusers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py # this target runs checks on all files quality: ruff check $(check_dirs) setup.py ruff format --check $(check_dirs) setup.py python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py --fix ruff format $(check_dirs) setup.py ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/ # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch
diffusers/Makefile/0
{ "file_path": "diffusers/Makefile", "repo_id": "diffusers", "token_count": 889 }
84
FROM ubuntu:20.04 LABEL maintainer="Hugging Face" LABEL repository="diffusers" ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ apt install -y bash \ build-essential \ git \ git-lfs \ curl \ ca-certificates \ libsndfile1-dev \ python3.8 \ python3-pip \ python3.8-venv && \ rm -rf /var/lib/apt/lists # make sure to use venv RUN python3 -m venv /opt/venv ENV PATH="/opt/venv/bin:$PATH" # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ torch==2.1.2 \ torchvision==0.16.2 \ torchaudio==2.1.2 \ onnxruntime \ --extra-index-url https://download.pytorch.org/whl/cpu && \ python3 -m pip install --no-cache-dir \ accelerate \ datasets \ hf-doc-builder \ huggingface-hub \ Jinja2 \ librosa \ numpy \ scipy \ tensorboard \ transformers CMD ["/bin/bash"]
diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile/0
{ "file_path": "diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile", "repo_id": "diffusers", "token_count": 630 }
85
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LoRA LoRA is a fast and lightweight training method that inserts and trains a significantly smaller number of parameters instead of all the model parameters. This produces a smaller file (~100 MBs) and makes it easier to quickly train a model to learn a new concept. LoRA weights are typically loaded into the UNet, text encoder or both. There are two classes for loading LoRA weights: - [`LoraLoaderMixin`] provides functions for loading and unloading, fusing and unfusing, enabling and disabling, and more functions for managing LoRA weights. This class can be used with any model. - [`StableDiffusionXLLoraLoaderMixin`] is a [Stable Diffusion (SDXL)](../../api/pipelines/stable_diffusion/stable_diffusion_xl) version of the [`LoraLoaderMixin`] class for loading and saving LoRA weights. It can only be used with the SDXL model. <Tip> To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide. </Tip> ## LoraLoaderMixin [[autodoc]] loaders.lora.LoraLoaderMixin ## StableDiffusionXLLoraLoaderMixin [[autodoc]] loaders.lora.StableDiffusionXLLoraLoaderMixin
diffusers/docs/source/en/api/loaders/lora.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/lora.md", "repo_id": "diffusers", "token_count": 464 }
86
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. The abstract from the paper is: *We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.* This model was contributed by [takuma104](https://huggingface.co/takuma104). ❤️ The original codebase can be found at [lllyasviel/ControlNet](https://github.com/lllyasviel/ControlNet), and you can find official ControlNet checkpoints on [lllyasviel's](https://huggingface.co/lllyasviel) Hub profile. <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## StableDiffusionControlNetPipeline [[autodoc]] StableDiffusionControlNetPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_vae_slicing - disable_vae_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention - load_textual_inversion ## StableDiffusionControlNetImg2ImgPipeline [[autodoc]] StableDiffusionControlNetImg2ImgPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_vae_slicing - disable_vae_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention - load_textual_inversion ## StableDiffusionControlNetInpaintPipeline [[autodoc]] StableDiffusionControlNetInpaintPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_vae_slicing - disable_vae_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention - load_textual_inversion ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput ## FlaxStableDiffusionControlNetPipeline [[autodoc]] FlaxStableDiffusionControlNetPipeline - all - __call__ ## FlaxStableDiffusionControlNetPipelineOutput [[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/controlnet.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/controlnet.md", "repo_id": "diffusers", "token_count": 1147 }
87
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Paint by Example [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://huggingface.co/papers/2211.13227) is by Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, Fang Wen. The abstract from the paper is: *Language-guided image editing has achieved great success recently. In this paper, for the first time, we investigate exemplar-guided image editing for more precise control. We achieve this goal by leveraging self-supervised training to disentangle and re-organize the source image and the exemplar. However, the naive approach will cause obvious fusing artifacts. We carefully analyze it and propose an information bottleneck and strong augmentations to avoid the trivial solution of directly copying and pasting the exemplar image. Meanwhile, to ensure the controllability of the editing process, we design an arbitrary shape mask for the exemplar image and leverage the classifier-free guidance to increase the similarity to the exemplar image. The whole framework involves a single forward of the diffusion model without any iterative optimization. We demonstrate that our method achieves an impressive performance and enables controllable editing on in-the-wild images with high fidelity.* The original codebase can be found at [Fantasy-Studio/Paint-by-Example](https://github.com/Fantasy-Studio/Paint-by-Example), and you can try it out in a [demo](https://huggingface.co/spaces/Fantasy-Studio/Paint-by-Example). ## Tips Paint by Example is supported by the official [Fantasy-Studio/Paint-by-Example](https://huggingface.co/Fantasy-Studio/Paint-by-Example) checkpoint. The checkpoint is warm-started from [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to inpaint partly masked images conditioned on example and reference images. <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## PaintByExamplePipeline [[autodoc]] PaintByExamplePipeline - all - __call__ ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/paint_by_example.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/paint_by_example.md", "repo_id": "diffusers", "token_count": 763 }
88
<!--Copyright 2023 The Intel Labs Team Authors and HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-to-(RGB, depth) LDM3D was proposed in [LDM3D: Latent Diffusion Model for 3D](https://huggingface.co/papers/2305.10853) by Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, and Vasudev Lal. LDM3D generates an image and a depth map from a given text prompt unlike the existing text-to-image diffusion models such as [Stable Diffusion](./overview) which only generates an image. With almost the same number of parameters, LDM3D achieves to create a latent space that can compress both the RGB images and the depth maps. Two checkpoints are available for use: - [ldm3d-original](https://huggingface.co/Intel/ldm3d). The original checkpoint used in the [paper](https://arxiv.org/pdf/2305.10853.pdf) - [ldm3d-4c](https://huggingface.co/Intel/ldm3d-4c). The new version of LDM3D using 4 channels inputs instead of 6-channels inputs and finetuned on higher resolution images. The abstract from the paper is: *This research paper proposes a Latent Diffusion Model for 3D (LDM3D) that generates both image and depth map data from a given text prompt, allowing users to generate RGBD images from text prompts. The LDM3D model is fine-tuned on a dataset of tuples containing an RGB image, depth map and caption, and validated through extensive experiments. We also develop an application called DepthFusion, which uses the generated RGB images and depth maps to create immersive and interactive 360-degree-view experiences using TouchDesigner. This technology has the potential to transform a wide range of industries, from entertainment and gaming to architecture and design. Overall, this paper presents a significant contribution to the field of generative AI and computer vision, and showcases the potential of LDM3D and DepthFusion to revolutionize content creation and digital experiences. A short video summarizing the approach can be found at [this url](https://t.ly/tdi2).* <Tip> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! </Tip> ## StableDiffusionLDM3DPipeline [[autodoc]] pipelines.stable_diffusion_ldm3d.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline - all - __call__ ## LDM3DPipelineOutput [[autodoc]] pipelines.stable_diffusion_ldm3d.pipeline_stable_diffusion_ldm3d.LDM3DPipelineOutput - all - __call__ # Upscaler [LDM3D-VR](https://arxiv.org/pdf/2311.03226.pdf) is an extended version of LDM3D. The abstract from the paper is: *Latent diffusion models have proven to be state-of-the-art in the creation and manipulation of visual outputs. However, as far as we know, the generation of depth maps jointly with RGB is still limited. We introduce LDM3D-VR, a suite of diffusion models targeting virtual reality development that includes LDM3D-pano and LDM3D-SR. These models enable the generation of panoramic RGBD based on textual prompts and the upscaling of low-resolution inputs to high-resolution RGBD, respectively. Our models are fine-tuned from existing pretrained models on datasets containing panoramic/high-resolution RGB images, depth maps and captions. Both models are evaluated in comparison to existing related methods* Two checkpoints are available for use: - [ldm3d-pano](https://huggingface.co/Intel/ldm3d-pano). This checkpoint enables the generation of panoramic images and requires the StableDiffusionLDM3DPipeline pipeline to be used. - [ldm3d-sr](https://huggingface.co/Intel/ldm3d-sr). This checkpoint enables the upscaling of RGB and depth images. Can be used in cascade after the original LDM3D pipeline using the StableDiffusionUpscaleLDM3DPipeline from communauty pipeline.
diffusers/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md", "repo_id": "diffusers", "token_count": 1200 }
89
# ConsistencyDecoderScheduler This scheduler is a part of the [`ConsistencyDecoderPipeline`] and was introduced in [DALL-E 3](https://openai.com/dall-e-3). The original codebase can be found at [openai/consistency_models](https://github.com/openai/consistency_models). ## ConsistencyDecoderScheduler [[autodoc]] schedulers.scheduling_consistency_decoder.ConsistencyDecoderScheduler
diffusers/docs/source/en/api/schedulers/consistency_decoder.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/consistency_decoder.md", "repo_id": "diffusers", "token_count": 132 }
90
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/> <br> </p> # Diffusers 🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or want to train your own diffusion model, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](conceptual/philosophy#usability-over-performance), [simple over easy](conceptual/philosophy#simple-over-easy), and [customizability over abstractions](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). The library has three main components: - State-of-the-art diffusion pipelines for inference with just a few lines of code. There are many pipelines in 🤗 Diffusers, check out the table in the pipeline [overview](api/pipelines/overview) for a complete list of available pipelines and the task they solve. - Interchangeable [noise schedulers](api/schedulers/overview) for balancing trade-offs between generation speed and quality. - Pretrained [models](api/models) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Learn the fundamental skills you need to start generating outputs, build your own diffusion system, and train a diffusion model. We recommend starting here if you're using 🤗 Diffusers for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides for helping you load pipelines, models, and schedulers. You'll also learn how to use pipelines for specific tasks, control how outputs are generated, optimize for inference speed, and different training techniques.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">Understand why the library was designed the way it was, and learn more about the ethical guidelines and safety implementations for using the library.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models/overview" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 Diffusers classes and methods work.</p> </a> </div> </div>
diffusers/docs/source/en/index.md/0
{ "file_path": "diffusers/docs/source/en/index.md", "repo_id": "diffusers", "token_count": 1317 }
91
# Adapt a model to a new task Many diffusion systems share the same components, allowing you to adapt a pretrained model for one task to an entirely different task. This guide will show you how to adapt a pretrained text-to-image model for inpainting by initializing and modifying the architecture of a pretrained [`UNet2DConditionModel`]. ## Configure UNet2DConditionModel parameters A [`UNet2DConditionModel`] by default accepts 4 channels in the [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels). For example, load a pretrained text-to-image model like [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) and take a look at the number of `in_channels`: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) pipeline.unet.config["in_channels"] 4 ``` Inpainting requires 9 channels in the input sample. You can check this value in a pretrained inpainting model like [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting): ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", use_safetensors=True) pipeline.unet.config["in_channels"] 9 ``` To adapt your text-to-image model for inpainting, you'll need to change the number of `in_channels` from 4 to 9. Initialize a [`UNet2DConditionModel`] with the pretrained text-to-image model weights, and change `in_channels` to 9. Changing the number of `in_channels` means you need to set `ignore_mismatched_sizes=True` and `low_cpu_mem_usage=False` to avoid a size mismatch error because the shape is different now. ```py from diffusers import UNet2DConditionModel model_id = "runwayml/stable-diffusion-v1-5" unet = UNet2DConditionModel.from_pretrained( model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, use_safetensors=True, ) ``` The pretrained weights of the other components from the text-to-image model are initialized from their checkpoints, but the input channel weights (`conv_in.weight`) of the `unet` are randomly initialized. It is important to finetune the model for inpainting because otherwise the model returns noise.
diffusers/docs/source/en/training/adapt_a_model.md/0
{ "file_path": "diffusers/docs/source/en/training/adapt_a_model.md", "repo_id": "diffusers", "token_count": 779 }
92
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional image generation Unconditional image generation models are not conditioned on text or images during training. It only generates images that resemble its training data distribution. This guide will explore the [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies: ```bash cd examples/unconditional_image_generation pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. ## Script parameters <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py) and let us know if you have any questions or concerns. </Tip> The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L55) function. It provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the bf16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_unconditional.py \ --mixed_precision="bf16" ``` Some basic and important parameters to specify include: - `--dataset_name`: the name of the dataset on the Hub or a local path to the dataset to train on - `--output_dir`: where to save the trained model - `--push_to_hub`: whether to push the trained model to the Hub - `--checkpointing_steps`: frequency of saving a checkpoint as the model trains; this is useful if training is interrupted, you can continue training from that checkpoint by adding `--resume_from_checkpoint` to your training command Bring your dataset, and let the training script handle everything else! ## Training script The code for preprocessing the dataset and the training loop is found in the [`main()`](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L275) function. If you need to adapt the training script, this is where you'll need to make your changes. The `train_unconditional` script [initializes a `UNet2DModel`](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L356) if you don't provide a model configuration. You can configure the UNet here if you'd like: ```py model = UNet2DModel( sample_size=args.resolution, in_channels=3, out_channels=3, layers_per_block=2, block_out_channels=(128, 128, 256, 256, 512, 512), down_block_types=( "DownBlock2D", "DownBlock2D", "DownBlock2D", "DownBlock2D", "AttnDownBlock2D", "DownBlock2D", ), up_block_types=( "UpBlock2D", "AttnUpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", "UpBlock2D", ), ) ``` Next, the script initializes a [scheduler](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L418) and [optimizer](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L429): ```py # Initialize the scheduler accepts_prediction_type = "prediction_type" in set(inspect.signature(DDPMScheduler.__init__).parameters.keys()) if accepts_prediction_type: noise_scheduler = DDPMScheduler( num_train_timesteps=args.ddpm_num_steps, beta_schedule=args.ddpm_beta_schedule, prediction_type=args.prediction_type, ) else: noise_scheduler = DDPMScheduler(num_train_timesteps=args.ddpm_num_steps, beta_schedule=args.ddpm_beta_schedule) # Initialize the optimizer optimizer = torch.optim.AdamW( model.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Then it [loads a dataset](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L451) and you can specify how to [preprocess](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L455) it: ```py dataset = load_dataset("imagefolder", data_dir=args.train_data_dir, cache_dir=args.cache_dir, split="train") augmentations = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) ``` Finally, the [training loop](https://github.com/huggingface/diffusers/blob/096f84b05f9514fae9f185cbec0a4d38fbad9919/examples/unconditional_image_generation/train_unconditional.py#L540) handles everything else such as adding noise to the images, predicting the noise residual, calculating the loss, saving checkpoints at specified steps, and saving and pushing the model to the Hub. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 <Tip warning={true}> A full training run takes 2 hours on 4xV100 GPUs. </Tip> <hfoptions id="launchtraining"> <hfoption id="single GPU"> ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --output_dir="ddpm-ema-flowers-64" \ --mixed_precision="fp16" \ --push_to_hub ``` </hfoption> <hfoption id="multi-GPU"> If you're training with more than one GPU, add the `--multi_gpu` parameter to the training command: ```bash accelerate launch --multi_gpu train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --output_dir="ddpm-ema-flowers-64" \ --mixed_precision="fp16" \ --push_to_hub ``` </hfoption> </hfoptions> The training script creates and saves a checkpoint file in your repository. Now you can load and use your trained model for inference: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128").to("cuda") image = pipeline().images[0] ```
diffusers/docs/source/en/training/unconditional_training.md/0
{ "file_path": "diffusers/docs/source/en/training/unconditional_training.md", "repo_id": "diffusers", "token_count": 2950 }
93
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DiffEdit [[open-in-colab]] Image editing typically requires providing a mask of the area to be edited. DiffEdit automatically generates the mask for you based on a text query, making it easier overall to create a mask without image editing software. The DiffEdit algorithm works in three steps: 1. the diffusion model denoises an image conditioned on some query text and reference text which produces different noise estimates for different areas of the image; the difference is used to infer a mask to identify which area of the image needs to be changed to match the query text 2. the input image is encoded into latent space with DDIM 3. the latents are decoded with the diffusion model conditioned on the text query, using the mask as a guide such that pixels outside the mask remain the same as in the input image This guide will show you how to use DiffEdit to edit images without manually creating a mask. Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate ``` The [`StableDiffusionDiffEditPipeline`] requires an image mask and a set of partially inverted latents. The image mask is generated from the [`~StableDiffusionDiffEditPipeline.generate_mask`] function, and includes two parameters, `source_prompt` and `target_prompt`. These parameters determine what to edit in the image. For example, if you want to change a bowl of *fruits* to a bowl of *pears*, then: ```py source_prompt = "a bowl of fruits" target_prompt = "a bowl of pears" ``` The partially inverted latents are generated from the [`~StableDiffusionDiffEditPipeline.invert`] function, and it is generally a good idea to include a `prompt` or *caption* describing the image to help guide the inverse latent sampling process. The caption can often be your `source_prompt`, but feel free to experiment with other text descriptions! Let's load the pipeline, scheduler, inverse scheduler, and enable some optimizations to reduce memory usage: ```py import torch from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionDiffEditPipeline pipeline = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None, use_safetensors=True, ) pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) pipeline.enable_model_cpu_offload() pipeline.enable_vae_slicing() ``` Load the image to edit: ```py from diffusers.utils import load_image, make_image_grid img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" raw_image = load_image(img_url).resize((768, 768)) raw_image ``` Use the [`~StableDiffusionDiffEditPipeline.generate_mask`] function to generate the image mask. You'll need to pass it the `source_prompt` and `target_prompt` to specify what to edit in the image: ```py from PIL import Image source_prompt = "a bowl of fruits" target_prompt = "a basket of pears" mask_image = pipeline.generate_mask( image=raw_image, source_prompt=source_prompt, target_prompt=target_prompt, ) Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L").resize((768, 768)) ``` Next, create the inverted latents and pass it a caption describing the image: ```py inv_latents = pipeline.invert(prompt=source_prompt, image=raw_image).latents ``` Finally, pass the image mask and inverted latents to the pipeline. The `target_prompt` becomes the `prompt` now, and the `source_prompt` is used as the `negative_prompt`: ```py output_image = pipeline( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, negative_prompt=source_prompt, ).images[0] mask_image = Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L").resize((768, 768)) make_image_grid([raw_image, mask_image, output_image], rows=1, cols=3) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/assets/target.png?raw=true"/> <figcaption class="mt-2 text-center text-sm text-gray-500">edited image</figcaption> </div> </div> ## Generate source and target embeddings The source and target embeddings can be automatically generated with the [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) model instead of creating them manually. Load the Flan-T5 model and tokenizer from the 🤗 Transformers library: ```py import torch from transformers import AutoTokenizer, T5ForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large") model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16) ``` Provide some initial text to prompt the model to generate the source and target prompts. ```py source_concept = "bowl" target_concept = "basket" source_text = f"Provide a caption for images containing a {source_concept}. " "The captions should be in English and should be no longer than 150 characters." target_text = f"Provide a caption for images containing a {target_concept}. " "The captions should be in English and should be no longer than 150 characters." ``` Next, create a utility function to generate the prompts: ```py @torch.no_grad() def generate_prompts(input_prompt): input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda") outputs = model.generate( input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10 ) return tokenizer.batch_decode(outputs, skip_special_tokens=True) source_prompts = generate_prompts(source_text) target_prompts = generate_prompts(target_text) print(source_prompts) print(target_prompts) ``` <Tip> Check out the [generation strategy](https://huggingface.co/docs/transformers/main/en/generation_strategies) guide if you're interested in learning more about strategies for generating different quality text. </Tip> Load the text encoder model used by the [`StableDiffusionDiffEditPipeline`] to encode the text. You'll use the text encoder to compute the text embeddings: ```py import torch from diffusers import StableDiffusionDiffEditPipeline pipeline = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, use_safetensors=True ) pipeline.enable_model_cpu_offload() pipeline.enable_vae_slicing() @torch.no_grad() def embed_prompts(sentences, tokenizer, text_encoder, device="cuda"): embeddings = [] for sent in sentences: text_inputs = tokenizer( sent, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0] embeddings.append(prompt_embeds) return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0) source_embeds = embed_prompts(source_prompts, pipeline.tokenizer, pipeline.text_encoder) target_embeds = embed_prompts(target_prompts, pipeline.tokenizer, pipeline.text_encoder) ``` Finally, pass the embeddings to the [`~StableDiffusionDiffEditPipeline.generate_mask`] and [`~StableDiffusionDiffEditPipeline.invert`] functions, and pipeline to generate the image: ```diff from diffusers import DDIMInverseScheduler, DDIMScheduler from diffusers.utils import load_image, make_image_grid from PIL import Image pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" raw_image = load_image(img_url).resize((768, 768)) mask_image = pipeline.generate_mask( image=raw_image, - source_prompt=source_prompt, - target_prompt=target_prompt, + source_prompt_embeds=source_embeds, + target_prompt_embeds=target_embeds, ) inv_latents = pipeline.invert( - prompt=source_prompt, + prompt_embeds=source_embeds, image=raw_image, ).latents output_image = pipeline( mask_image=mask_image, image_latents=inv_latents, - prompt=target_prompt, - negative_prompt=source_prompt, + prompt_embeds=target_embeds, + negative_prompt_embeds=source_embeds, ).images[0] mask_image = Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L") make_image_grid([raw_image, mask_image, output_image], rows=1, cols=3) ``` ## Generate a caption for inversion While you can use the `source_prompt` as a caption to help generate the partially inverted latents, you can also use the [BLIP](https://huggingface.co/docs/transformers/model_doc/blip) model to automatically generate a caption. Load the BLIP model and processor from the 🤗 Transformers library: ```py import torch from transformers import BlipForConditionalGeneration, BlipProcessor processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16, low_cpu_mem_usage=True) ``` Create a utility function to generate a caption from the input image: ```py @torch.no_grad() def generate_caption(images, caption_generator, caption_processor): text = "a photograph of" inputs = caption_processor(images, text, return_tensors="pt").to(device="cuda", dtype=caption_generator.dtype) caption_generator.to("cuda") outputs = caption_generator.generate(**inputs, max_new_tokens=128) # offload caption generator caption_generator.to("cpu") caption = caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] return caption ``` Load an input image and generate a caption for it using the `generate_caption` function: ```py from diffusers.utils import load_image img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" raw_image = load_image(img_url).resize((768, 768)) caption = generate_caption(raw_image, model, processor) ``` <div class="flex justify-center"> <figure> <img class="rounded-xl" src="https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"/> <figcaption class="text-center">generated caption: "a photograph of a bowl of fruit on a table"</figcaption> </figure> </div> Now you can drop the caption into the [`~StableDiffusionDiffEditPipeline.invert`] function to generate the partially inverted latents!
diffusers/docs/source/en/using-diffusers/diffedit.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/diffedit.md", "repo_id": "diffusers", "token_count": 3848 }
94
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Improve image quality with deterministic generation [[open-in-colab]] A common way to improve the quality of generated images is with *deterministic batch generation*, generate a batch of images and select one image to improve with a more detailed prompt in a second round of inference. The key is to pass a list of [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator)'s to the pipeline for batched image generation, and tie each `Generator` to a seed so you can reuse it for an image. Let's use [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) for example, and generate several versions of the following prompt: ```py prompt = "Labrador in the style of Vermeer" ``` Instantiate a pipeline with [`DiffusionPipeline.from_pretrained`] and place it on a GPU (if available): ```python import torch from diffusers import DiffusionPipeline from diffusers.utils import make_image_grid pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) pipe = pipe.to("cuda") ``` Now, define four different `Generator`s and assign each `Generator` a seed (`0` to `3`) so you can reuse a `Generator` later for a specific image: ```python generator = [torch.Generator(device="cuda").manual_seed(i) for i in range(4)] ``` <Tip warning={true}> To create a batched seed, you should use a list comprehension that iterates over the length specified in `range()`. This creates a unique `Generator` object for each image in the batch. If you only multiply the `Generator` by the batch size, this only creates one `Generator` object that is used sequentially for each image in the batch. For example, if you want to use the same seed to create 4 identical images: ```py ❌ [torch.Generator().manual_seed(seed)] * 4 ✅ [torch.Generator().manual_seed(seed) for _ in range(4)] ``` </Tip> Generate the images and have a look: ```python images = pipe(prompt, generator=generator, num_images_per_prompt=4).images make_image_grid(images, rows=2, cols=2) ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds.jpg) In this example, you'll improve upon the first image - but in reality, you can use any image you want (even the image with double sets of eyes!). The first image used the `Generator` with seed `0`, so you'll reuse that `Generator` for the second round of inference. To improve the quality of the image, add some additional text to the prompt: ```python prompt = [prompt + t for t in [", highly realistic", ", artsy", ", trending", ", colorful"]] generator = [torch.Generator(device="cuda").manual_seed(0) for i in range(4)] ``` Create four generators with seed `0`, and generate another batch of images, all of which should look like the first image from the previous round! ```python images = pipe(prompt, generator=generator).images make_image_grid(images, rows=2, cols=2) ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds_2.jpg)
diffusers/docs/source/en/using-diffusers/reusing_seeds.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/reusing_seeds.md", "repo_id": "diffusers", "token_count": 1100 }
95
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 効果的で効率的な拡散モデル [[open-in-colab]] [`DiffusionPipeline`]を使って特定のスタイルで画像を生成したり、希望する画像を生成したりするのは難しいことです。多くの場合、[`DiffusionPipeline`]を何度か実行してからでないと満足のいく画像は得られません。しかし、何もないところから何かを生成するにはたくさんの計算が必要です。生成を何度も何度も実行する場合、特にたくさんの計算量が必要になります。 そのため、パイプラインから*計算*(速度)と*メモリ*(GPU RAM)の効率を最大限に引き出し、生成サイクル間の時間を短縮することで、より高速な反復処理を行えるようにすることが重要です。 このチュートリアルでは、[`DiffusionPipeline`]を用いて、より速く、より良い計算を行う方法を説明します。 まず、[`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)モデルをロードします: ```python from diffusers import DiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True) ``` ここで使用するプロンプトの例は年老いた戦士の長の肖像画ですが、ご自由に変更してください: ```python prompt = "portrait photo of a old warrior chief" ``` ## Speed <Tip> 💡 GPUを利用できない場合は、[Colab](https://colab.research.google.com/)のようなGPUプロバイダーから無料で利用できます! </Tip> 画像生成を高速化する最も簡単な方法の1つは、PyTorchモジュールと同じようにGPU上にパイプラインを配置することです: ```python pipeline = pipeline.to("cuda") ``` 同じイメージを使って改良できるようにするには、[`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html)を使い、[reproducibility](./using-diffusers/reproducibility)の種を設定します: ```python import torch generator = torch.Generator("cuda").manual_seed(0) ``` これで画像を生成できます: ```python image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png"> </div> この処理にはT4 GPUで~30秒かかりました(割り当てられているGPUがT4より優れている場合はもっと速いかもしれません)。デフォルトでは、[`DiffusionPipeline`]は完全な`float32`精度で生成を50ステップ実行します。float16`のような低い精度に変更するか、推論ステップ数を減らすことで高速化することができます。 まずは `float16` でモデルをロードして画像を生成してみましょう: ```python import torch pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True) pipeline = pipeline.to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png"> </div> 今回、画像生成にかかった時間はわずか11秒で、以前より3倍近く速くなりました! <Tip> 💡 パイプラインは常に `float16` で実行することを強くお勧めします。 </Tip> 生成ステップ数を減らすという方法もあります。より効率的なスケジューラを選択することで、出力品質を犠牲にすることなくステップ数を減らすことができます。`compatibles`メソッドを呼び出すことで、[`DiffusionPipeline`]の現在のモデルと互換性のあるスケジューラを見つけることができます: ```python pipeline.scheduler.compatibles [ diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, ] ``` Stable Diffusionモデルはデフォルトで[`PNDMScheduler`]を使用します。このスケジューラは通常~50の推論ステップを必要としますが、[`DPMSolverMultistepScheduler`]のような高性能なスケジューラでは~20または25の推論ステップで済みます。[`ConfigMixin.from_config`]メソッドを使用すると、新しいスケジューラをロードすることができます: ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) ``` ここで `num_inference_steps` を20に設定します: ```python generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png"> </div> 推論時間をわずか4秒に短縮することに成功した!⚡️ ## メモリー パイプラインのパフォーマンスを向上させるもう1つの鍵は、消費メモリを少なくすることです。一度に生成できる画像の数を確認する最も簡単な方法は、`OutOfMemoryError`(OOM)が発生するまで、さまざまなバッチサイズを試してみることです。 文章と `Generators` のリストから画像のバッチを生成する関数を作成します。各 `Generator` にシードを割り当てて、良い結果が得られた場合に再利用できるようにします。 ```python def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} ``` `batch_size=4`で開始し、どれだけメモリを消費したかを確認します: ```python from diffusers.utils import make_image_grid images = pipeline(**get_inputs(batch_size=4)).images make_image_grid(images, 2, 2) ``` 大容量のRAMを搭載したGPUでない限り、上記のコードはおそらく`OOM`エラーを返したはずです!メモリの大半はクロスアテンションレイヤーが占めています。この処理をバッチで実行する代わりに、逐次実行することでメモリを大幅に節約できます。必要なのは、[`~DiffusionPipeline.enable_attention_slicing`]関数を使用することだけです: ```python pipeline.enable_attention_slicing() ``` 今度は`batch_size`を8にしてみてください! ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png"> </div> 以前は4枚の画像のバッチを生成することさえできませんでしたが、今では8枚の画像のバッチを1枚あたり~3.5秒で生成できます!これはおそらく、品質を犠牲にすることなくT4 GPUでできる最速の処理速度です。 ## 品質 前の2つのセクションでは、`fp16` を使ってパイプラインの速度を最適化する方法、よりパフォーマン スなスケジューラーを使って生成ステップ数を減らす方法、アテンションスライスを有効 にしてメモリ消費量を減らす方法について学びました。今度は、生成される画像の品質を向上させる方法に焦点を当てます。 ### より良いチェックポイント 最も単純なステップは、より良いチェックポイントを使うことです。Stable Diffusionモデルは良い出発点であり、公式発表以来、いくつかの改良版もリリースされています。しかし、新しいバージョンを使ったからといって、自動的に良い結果が得られるわけではありません。最良の結果を得るためには、自分でさまざまなチェックポイントを試してみたり、ちょっとした研究([ネガティブプロンプト](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/)の使用など)をしたりする必要があります。 この分野が成長するにつれて、特定のスタイルを生み出すために微調整された、より質の高いチェックポイントが増えています。[Hub](https://huggingface.co/models?library=diffusers&sort=downloads)や[Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery)を探索して、興味のあるものを見つけてみてください! ### より良いパイプラインコンポーネント 現在のパイプラインコンポーネントを新しいバージョンに置き換えてみることもできます。Stability AIが提供する最新の[autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae)をパイプラインにロードし、画像を生成してみましょう: ```python from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") pipeline.vae = vae images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png"> </div> ### より良いプロンプト・エンジニアリング 画像を生成するために使用する文章は、*プロンプトエンジニアリング*と呼ばれる分野を作られるほど、非常に重要です。プロンプト・エンジニアリングで考慮すべき点は以下の通りです: - 生成したい画像やその類似画像は、インターネット上にどのように保存されているか? - 私が望むスタイルにモデルを誘導するために、どのような追加詳細を与えるべきか? このことを念頭に置いて、プロンプトに色やより質の高いディテールを含めるように改良してみましょう: ```python prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" ``` 新しいプロンプトで画像のバッチを生成しましょう: ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png"> </div> かなりいいです!種が`1`の`Generator`に対応する2番目の画像に、被写体の年齢に関するテキストを追加して、もう少し手を加えてみましょう: ```python prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images make_image_grid(images, 2, 2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png"> </div> ## 次のステップ このチュートリアルでは、[`DiffusionPipeline`]を最適化して計算効率とメモリ効率を向上させ、生成される出力の品質を向上させる方法を学びました。パイプラインをさらに高速化することに興味があれば、以下のリソースを参照してください: - [PyTorch 2.0](./optimization/torch2.0)と[`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html)がどのように生成速度を5-300%高速化できるかを学んでください。A100 GPUの場合、画像生成は最大50%速くなります! - PyTorch 2が使えない場合は、[xFormers](./optimization/xformers)をインストールすることをお勧めします。このライブラリのメモリ効率の良いアテンションメカニズムは PyTorch 1.13.1 と相性が良く、高速化とメモリ消費量の削減を同時に実現します。 - モデルのオフロードなど、その他の最適化テクニックは [this guide](./optimization/fp16) でカバーされています。
diffusers/docs/source/ja/stable_diffusion.md/0
{ "file_path": "diffusers/docs/source/ja/stable_diffusion.md", "repo_id": "diffusers", "token_count": 6242 }
96
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Diffusers에서의 PyTorch 2.0 가속화 지원 `0.13.0` 버전부터 Diffusers는 [PyTorch 2.0](https://pytorch.org/get-started/pytorch-2.0/)에서의 최신 최적화를 지원합니다. 이는 다음을 포함됩니다. 1. momory-efficient attention을 사용한 가속화된 트랜스포머 지원 - `xformers`같은 추가적인 dependencies 필요 없음 2. 추가 성능 향상을 위한 개별 모델에 대한 컴파일 기능 [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) 지원 ## 설치 가속화된 어텐션 구현과 및 `torch.compile()`을 사용하기 위해, pip에서 최신 버전의 PyTorch 2.0을 설치되어 있고 diffusers 0.13.0. 버전 이상인지 확인하세요. 아래 설명된 바와 같이, PyTorch 2.0이 활성화되어 있을 때 diffusers는 최적화된 어텐션 프로세서([`AttnProcessor2_0`](https://github.com/huggingface/diffusers/blob/1a5797c6d4491a879ea5285c4efc377664e0332d/src/diffusers/models/attention_processor.py#L798))를 사용합니다. ```bash pip install --upgrade torch diffusers ``` ## 가속화된 트랜스포머와 `torch.compile` 사용하기. 1. **가속화된 트랜스포머 구현** PyTorch 2.0에는 [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) 함수를 통해 최적화된 memory-efficient attention의 구현이 포함되어 있습니다. 이는 입력 및 GPU 유형에 따라 여러 최적화를 자동으로 활성화합니다. 이는 [xFormers](https://github.com/facebookresearch/xformers)의 `memory_efficient_attention`과 유사하지만 기본적으로 PyTorch에 내장되어 있습니다. 이러한 최적화는 PyTorch 2.0이 설치되어 있고 `torch.nn.functional.scaled_dot_product_attention`을 사용할 수 있는 경우 Diffusers에서 기본적으로 활성화됩니다. 이를 사용하려면 `torch 2.0`을 설치하고 파이프라인을 사용하기만 하면 됩니다. 예를 들어: ```Python import torch from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` 이를 명시적으로 활성화하려면(필수는 아님) 아래와 같이 수행할 수 있습니다. ```diff import torch from diffusers import DiffusionPipeline + from diffusers.models.attention_processor import AttnProcessor2_0 pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") + pipe.unet.set_attn_processor(AttnProcessor2_0()) prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` 이 실행 과정은 `xFormers`만큼 빠르고 메모리적으로 효율적이어야 합니다. 자세한 내용은 [벤치마크](#benchmark)에서 확인하세요. 파이프라인을 보다 deterministic으로 만들거나 파인 튜닝된 모델을 [Core ML](https://huggingface.co/docs/diffusers/v0.16.0/en/optimization/coreml#how-to-run-stable-diffusion-with-core-ml)과 같은 다른 형식으로 변환해야 하는 경우 바닐라 어텐션 프로세서 ([`AttnProcessor`](https://github.com/huggingface/diffusers/blob/1a5797c6d4491a879ea5285c4efc377664e0332d/src/diffusers/models/attention_processor.py#L402))로 되돌릴 수 있습니다. 일반 어텐션 프로세서를 사용하려면 [`~diffusers.UNet2DConditionModel.set_default_attn_processor`] 함수를 사용할 수 있습니다: ```Python import torch from diffusers import DiffusionPipeline from diffusers.models.attention_processor import AttnProcessor pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipe.unet.set_default_attn_processor() prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` 2. **torch.compile** 추가적인 속도 향상을 위해 새로운 `torch.compile` 기능을 사용할 수 있습니다. 파이프라인의 UNet은 일반적으로 계산 비용이 가장 크기 때문에 나머지 하위 모델(텍스트 인코더와 VAE)은 그대로 두고 `unet`을 `torch.compile`로 래핑합니다. 자세한 내용과 다른 옵션은 [torch 컴파일 문서](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)를 참조하세요. ```python pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) images = pipe(prompt, num_inference_steps=steps, num_images_per_prompt=batch_size).images ``` GPU 유형에 따라 `compile()`은 가속화된 트랜스포머 최적화를 통해 **5% - 300%**의 _추가 성능 향상_을 얻을 수 있습니다. 그러나 컴파일은 Ampere(A100, 3090), Ada(4090) 및 Hopper(H100)와 같은 최신 GPU 아키텍처에서 더 많은 성능 향상을 가져올 수 있음을 참고하세요. 컴파일은 완료하는 데 약간의 시간이 걸리므로, 파이프라인을 한 번 준비한 다음 동일한 유형의 추론 작업을 여러 번 수행해야 하는 상황에 가장 적합합니다. 다른 이미지 크기에서 컴파일된 파이프라인을 호출하면 시간적 비용이 많이 들 수 있는 컴파일 작업이 다시 트리거됩니다. ## 벤치마크 PyTorch 2.0의 효율적인 어텐션 구현과 `torch.compile`을 사용하여 가장 많이 사용되는 5개의 파이프라인에 대해 다양한 GPU와 배치 크기에 걸쳐 포괄적인 벤치마크를 수행했습니다. 여기서는 [`torch.compile()`이 최적으로 활용되도록 하는](https://github.com/huggingface/diffusers/pull/3313) `diffusers 0.17.0.dev0`을 사용했습니다. ### 벤치마킹 코드 #### Stable Diffusion text-to-image ```python from diffusers import DiffusionPipeline import torch path = "runwayml/stable-diffusion-v1-5" run_compile = True # Set True / False pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): images = pipe(prompt=prompt).images ``` #### Stable Diffusion image-to-image ```python from diffusers import StableDiffusionImg2ImgPipeline import requests import torch from PIL import Image from io import BytesIO url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image = init_image.resize((512, 512)) path = "runwayml/stable-diffusion-v1-5" run_compile = True # Set True / False pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): image = pipe(prompt=prompt, image=init_image).images[0] ``` #### Stable Diffusion - inpainting ```python from diffusers import StableDiffusionInpaintPipeline import requests import torch from PIL import Image from io import BytesIO url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" def download_image(url): response = requests.get(url) return Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) path = "runwayml/stable-diffusion-inpainting" run_compile = True # Set True / False pipe = StableDiffusionInpaintPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] ``` #### ControlNet ```python from diffusers import StableDiffusionControlNetPipeline, ControlNetModel import requests import torch from PIL import Image from io import BytesIO url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image = init_image.resize((512, 512)) path = "runwayml/stable-diffusion-v1-5" run_compile = True # Set True / False controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( path, controlnet=controlnet, torch_dtype=torch.float16 ) pipe = pipe.to("cuda") pipe.unet.to(memory_format=torch.channels_last) pipe.controlnet.to(memory_format=torch.channels_last) if run_compile: print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) prompt = "ghibli style, a fantasy landscape with castles" for _ in range(3): image = pipe(prompt=prompt, image=init_image).images[0] ``` #### IF text-to-image + upscaling ```python from diffusers import DiffusionPipeline import torch run_compile = True # Set True / False pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16) pipe.to("cuda") pipe_2 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-II-M-v1.0", variant="fp16", text_encoder=None, torch_dtype=torch.float16) pipe_2.to("cuda") pipe_3 = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16) pipe_3.to("cuda") pipe.unet.to(memory_format=torch.channels_last) pipe_2.unet.to(memory_format=torch.channels_last) pipe_3.unet.to(memory_format=torch.channels_last) if run_compile: pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe_2.unet = torch.compile(pipe_2.unet, mode="reduce-overhead", fullgraph=True) pipe_3.unet = torch.compile(pipe_3.unet, mode="reduce-overhead", fullgraph=True) prompt = "the blue hulk" prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16) neg_prompt_embeds = torch.randn((1, 2, 4096), dtype=torch.float16) for _ in range(3): image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images image_2 = pipe_2(image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=neg_prompt_embeds, output_type="pt").images image_3 = pipe_3(prompt=prompt, image=image, noise_level=100).images ``` PyTorch 2.0 및 `torch.compile()`로 얻을 수 있는 가능한 속도 향상에 대해, [Stable Diffusion text-to-image pipeline](StableDiffusionPipeline)에 대한 상대적인 속도 향상을 보여주는 차트를 5개의 서로 다른 GPU 제품군(배치 크기 4)에 대해 나타냅니다: ![t2i_speedup](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/pt2_benchmarks/t2i_speedup.png) To give you an even better idea of how this speed-up holds for the other pipelines presented above, consider the following plot that shows the benchmarking numbers from an A100 across three different batch sizes (with PyTorch 2.0 nightly and `torch.compile()`): 이 속도 향상이 위에 제시된 다른 파이프라인에 대해서도 어떻게 유지되는지 더 잘 이해하기 위해, 세 가지의 다른 배치 크기에 걸쳐 A100의 벤치마킹(PyTorch 2.0 nightly 및 `torch.compile() 사용) 수치를 보여주는 차트를 보입니다: ![a100_numbers](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/pt2_benchmarks/a100_numbers.png) _(위 차트의 벤치마크 메트릭은 **초당 iteration 수(iterations/second)**입니다)_ 그러나 투명성을 위해 모든 벤치마킹 수치를 공개합니다! 다음 표들에서는, **_초당 처리되는 iteration_** 수 측면에서의 결과를 보여줍니다. ### A100 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 21.66 | 23.13 | 44.03 | 49.74 | | SD - img2img | 21.81 | 22.40 | 43.92 | 46.32 | | SD - inpaint | 22.24 | 23.23 | 43.76 | 49.25 | | SD - controlnet | 15.02 | 15.82 | 32.13 | 36.08 | | IF | 20.21 / <br>13.84 / <br>24.00 | 20.12 / <br>13.70 / <br>24.03 | ❌ | 97.34 / <br>27.23 / <br>111.66 | ### A100 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 11.6 | 13.12 | 14.62 | 17.27 | | SD - img2img | 11.47 | 13.06 | 14.66 | 17.25 | | SD - inpaint | 11.67 | 13.31 | 14.88 | 17.48 | | SD - controlnet | 8.28 | 9.38 | 10.51 | 12.41 | | IF | 25.02 | 18.04 | ❌ | 48.47 | ### A100 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 3.04 | 3.6 | 3.83 | 4.68 | | SD - img2img | 2.98 | 3.58 | 3.83 | 4.67 | | SD - inpaint | 3.04 | 3.66 | 3.9 | 4.76 | | SD - controlnet | 2.15 | 2.58 | 2.74 | 3.35 | | IF | 8.78 | 9.82 | ❌ | 16.77 | ### V100 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 18.99 | 19.14 | 20.95 | 22.17 | | SD - img2img | 18.56 | 19.18 | 20.95 | 22.11 | | SD - inpaint | 19.14 | 19.06 | 21.08 | 22.20 | | SD - controlnet | 13.48 | 13.93 | 15.18 | 15.88 | | IF | 20.01 / <br>9.08 / <br>23.34 | 19.79 / <br>8.98 / <br>24.10 | ❌ | 55.75 / <br>11.57 / <br>57.67 | ### V100 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 5.96 | 5.89 | 6.83 | 6.86 | | SD - img2img | 5.90 | 5.91 | 6.81 | 6.82 | | SD - inpaint | 5.99 | 6.03 | 6.93 | 6.95 | | SD - controlnet | 4.26 | 4.29 | 4.92 | 4.93 | | IF | 15.41 | 14.76 | ❌ | 22.95 | ### V100 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 1.66 | 1.66 | 1.92 | 1.90 | | SD - img2img | 1.65 | 1.65 | 1.91 | 1.89 | | SD - inpaint | 1.69 | 1.69 | 1.95 | 1.93 | | SD - controlnet | 1.19 | 1.19 | OOM after warmup | 1.36 | | IF | 5.43 | 5.29 | ❌ | 7.06 | ### T4 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 6.9 | 6.95 | 7.3 | 7.56 | | SD - img2img | 6.84 | 6.99 | 7.04 | 7.55 | | SD - inpaint | 6.91 | 6.7 | 7.01 | 7.37 | | SD - controlnet | 4.89 | 4.86 | 5.35 | 5.48 | | IF | 17.42 / <br>2.47 / <br>18.52 | 16.96 / <br>2.45 / <br>18.69 | ❌ | 24.63 / <br>2.47 / <br>23.39 | ### T4 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 1.79 | 1.79 | 2.03 | 1.99 | | SD - img2img | 1.77 | 1.77 | 2.05 | 2.04 | | SD - inpaint | 1.81 | 1.82 | 2.09 | 2.09 | | SD - controlnet | 1.34 | 1.27 | 1.47 | 1.46 | | IF | 5.79 | 5.61 | ❌ | 7.39 | ### T4 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 2.34s | 2.30s | OOM after 2nd iteration | 1.99s | | SD - img2img | 2.35s | 2.31s | OOM after warmup | 2.00s | | SD - inpaint | 2.30s | 2.26s | OOM after 2nd iteration | 1.95s | | SD - controlnet | OOM after 2nd iteration | OOM after 2nd iteration | OOM after warmup | OOM after warmup | | IF * | 1.44 | 1.44 | ❌ | 1.94 | ### RTX 3090 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 22.56 | 22.84 | 23.84 | 25.69 | | SD - img2img | 22.25 | 22.61 | 24.1 | 25.83 | | SD - inpaint | 22.22 | 22.54 | 24.26 | 26.02 | | SD - controlnet | 16.03 | 16.33 | 17.38 | 18.56 | | IF | 27.08 / <br>9.07 / <br>31.23 | 26.75 / <br>8.92 / <br>31.47 | ❌ | 68.08 / <br>11.16 / <br>65.29 | ### RTX 3090 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 6.46 | 6.35 | 7.29 | 7.3 | | SD - img2img | 6.33 | 6.27 | 7.31 | 7.26 | | SD - inpaint | 6.47 | 6.4 | 7.44 | 7.39 | | SD - controlnet | 4.59 | 4.54 | 5.27 | 5.26 | | IF | 16.81 | 16.62 | ❌ | 21.57 | ### RTX 3090 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 1.7 | 1.69 | 1.93 | 1.91 | | SD - img2img | 1.68 | 1.67 | 1.93 | 1.9 | | SD - inpaint | 1.72 | 1.71 | 1.97 | 1.94 | | SD - controlnet | 1.23 | 1.22 | 1.4 | 1.38 | | IF | 5.01 | 5.00 | ❌ | 6.33 | ### RTX 4090 (batch size: 1) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 40.5 | 41.89 | 44.65 | 49.81 | | SD - img2img | 40.39 | 41.95 | 44.46 | 49.8 | | SD - inpaint | 40.51 | 41.88 | 44.58 | 49.72 | | SD - controlnet | 29.27 | 30.29 | 32.26 | 36.03 | | IF | 69.71 / <br>18.78 / <br>85.49 | 69.13 / <br>18.80 / <br>85.56 | ❌ | 124.60 / <br>26.37 / <br>138.79 | ### RTX 4090 (batch size: 4) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 12.62 | 12.84 | 15.32 | 15.59 | | SD - img2img | 12.61 | 12,.79 | 15.35 | 15.66 | | SD - inpaint | 12.65 | 12.81 | 15.3 | 15.58 | | SD - controlnet | 9.1 | 9.25 | 11.03 | 11.22 | | IF | 31.88 | 31.14 | ❌ | 43.92 | ### RTX 4090 (batch size: 16) | **Pipeline** | **torch 2.0 - <br>no compile** | **torch nightly - <br>no compile** | **torch 2.0 - <br>compile** | **torch nightly - <br>compile** | |:---:|:---:|:---:|:---:|:---:| | SD - txt2img | 3.17 | 3.2 | 3.84 | 3.85 | | SD - img2img | 3.16 | 3.2 | 3.84 | 3.85 | | SD - inpaint | 3.17 | 3.2 | 3.85 | 3.85 | | SD - controlnet | 2.23 | 2.3 | 2.7 | 2.75 | | IF | 9.26 | 9.2 | ❌ | 13.31 | ## 참고 * Follow [this PR](https://github.com/huggingface/diffusers/pull/3313) for more details on the environment used for conducting the benchmarks. * For the IF pipeline and batch sizes > 1, we only used a batch size of >1 in the first IF pipeline for text-to-image generation and NOT for upscaling. So, that means the two upscaling pipelines received a batch size of 1. *Thanks to [Horace He](https://github.com/Chillee) from the PyTorch team for their support in improving our support of `torch.compile()` in Diffusers.* * 벤치마크 수행에 사용된 환경에 대한 자세한 내용은 [이 PR](https://github.com/huggingface/diffusers/pull/3313)을 참조하세요. * IF 파이프라인와 배치 크기 > 1의 경우 첫 번째 IF 파이프라인에서 text-to-image 생성을 위한 배치 크기 > 1만 사용했으며 업스케일링에는 사용하지 않았습니다. 즉, 두 개의 업스케일링 파이프라인이 배치 크기 1임을 의미합니다. *Diffusers에서 `torch.compile()` 지원을 개선하는 데 도움을 준 PyTorch 팀의 [Horace He](https://github.com/Chillee)에게 감사드립니다.*
diffusers/docs/source/ko/optimization/torch2.0.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/torch2.0.md", "repo_id": "diffusers", "token_count": 10791 }
97
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Diffusion 모델을 학습하기 Unconditional 이미지 생성은 학습에 사용된 데이터셋과 유사한 이미지를 생성하는 diffusion 모델에서 인기 있는 어플리케이션입니다. 일반적으로, 가장 좋은 결과는 특정 데이터셋에 사전 훈련된 모델을 파인튜닝하는 것으로 얻을 수 있습니다. 이 [허브](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model)에서 이러한 많은 체크포인트를 찾을 수 있지만, 만약 마음에 드는 체크포인트를 찾지 못했다면, 언제든지 스스로 학습할 수 있습니다! 이 튜토리얼은 나만의 🦋 나비 🦋를 생성하기 위해 [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) 데이터셋의 하위 집합에서 [`UNet2DModel`] 모델을 학습하는 방법을 가르쳐줄 것입니다. <Tip> 💡 이 학습 튜토리얼은 [Training with 🧨 Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) 노트북 기반으로 합니다. Diffusion 모델의 작동 방식 및 자세한 내용은 노트북을 확인하세요! </Tip> 시작 전에, 🤗 Datasets을 불러오고 전처리하기 위해 데이터셋이 설치되어 있는지 다수 GPU에서 학습을 간소화하기 위해 🤗 Accelerate 가 설치되어 있는지 확인하세요. 그 후 학습 메트릭을 시각화하기 위해 [TensorBoard](https://www.tensorflow.org/tensorboard)를 또한 설치하세요. (또한 학습 추적을 위해 [Weights & Biases](https://docs.wandb.ai/)를 사용할 수 있습니다.) ```bash !pip install diffusers[training] ``` 커뮤니티에 모델을 공유할 것을 권장하며, 이를 위해서 Hugging Face 계정에 로그인을 해야 합니다. (계정이 없다면 [여기](https://hf.co/join)에서 만들 수 있습니다.) 노트북에서 로그인할 수 있으며 메시지가 표시되면 토큰을 입력할 수 있습니다. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` 또는 터미널로 로그인할 수 있습니다: ```bash huggingface-cli login ``` 모델 체크포인트가 상당히 크기 때문에 [Git-LFS](https://git-lfs.com/)에서 대용량 파일의 버전 관리를 할 수 있습니다. ```bash !sudo apt -qq install git-lfs !git config --global credential.helper store ``` ## 학습 구성 편의를 위해 학습 파라미터들을 포함한 `TrainingConfig` 클래스를 생성합니다 (자유롭게 조정 가능): ```py >>> from dataclasses import dataclass >>> @dataclass ... class TrainingConfig: ... image_size = 128 # 생성되는 이미지 해상도 ... train_batch_size = 16 ... eval_batch_size = 16 # 평가 동안에 샘플링할 이미지 수 ... num_epochs = 50 ... gradient_accumulation_steps = 1 ... learning_rate = 1e-4 ... lr_warmup_steps = 500 ... save_image_epochs = 10 ... save_model_epochs = 30 ... mixed_precision = "fp16" # `no`는 float32, 자동 혼합 정밀도를 위한 `fp16` ... output_dir = "ddpm-butterflies-128" # 로컬 및 HF Hub에 저장되는 모델명 ... push_to_hub = True # 저장된 모델을 HF Hub에 업로드할지 여부 ... hub_private_repo = False ... overwrite_output_dir = True # 노트북을 다시 실행할 때 이전 모델에 덮어씌울지 ... seed = 0 >>> config = TrainingConfig() ``` ## 데이터셋 불러오기 🤗 Datasets 라이브러리와 [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) 데이터셋을 쉽게 불러올 수 있습니다. ```py >>> from datasets import load_dataset >>> config.dataset_name = "huggan/smithsonian_butterflies_subset" >>> dataset = load_dataset(config.dataset_name, split="train") ``` 💡[HugGan Community Event](https://huggingface.co/huggan) 에서 추가의 데이터셋을 찾거나 로컬의 [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder)를 만듦으로써 나만의 데이터셋을 사용할 수 있습니다. HugGan Community Event 에 가져온 데이터셋의 경우 리포지토리의 id로 `config.dataset_name` 을 설정하고, 나만의 이미지를 사용하는 경우 `imagefolder` 를 설정합니다. 🤗 Datasets은 [`~datasets.Image`] 기능을 사용해 자동으로 이미지 데이터를 디코딩하고 [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html)로 불러옵니다. 이를 시각화 해보면: ```py >>> import matplotlib.pyplot as plt >>> fig, axs = plt.subplots(1, 4, figsize=(16, 4)) >>> for i, image in enumerate(dataset[:4]["image"]): ... axs[i].imshow(image) ... axs[i].set_axis_off() >>> fig.show() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png) 이미지는 모두 다른 사이즈이기 때문에, 우선 전처리가 필요합니다: - `Resize` 는 `config.image_size` 에 정의된 이미지 사이즈로 변경합니다. - `RandomHorizontalFlip` 은 랜덤적으로 이미지를 미러링하여 데이터셋을 보강합니다. - `Normalize` 는 모델이 예상하는 [-1, 1] 범위로 픽셀 값을 재조정 하는데 중요합니다. ```py >>> from torchvision import transforms >>> preprocess = transforms.Compose( ... [ ... transforms.Resize((config.image_size, config.image_size)), ... transforms.RandomHorizontalFlip(), ... transforms.ToTensor(), ... transforms.Normalize([0.5], [0.5]), ... ] ... ) ``` 학습 도중에 `preprocess` 함수를 적용하려면 🤗 Datasets의 [`~datasets.Dataset.set_transform`] 방법이 사용됩니다. ```py >>> def transform(examples): ... images = [preprocess(image.convert("RGB")) for image in examples["image"]] ... return {"images": images} >>> dataset.set_transform(transform) ``` 이미지의 크기가 조정되었는지 확인하기 위해 이미지를 다시 시각화해보세요. 이제 [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader)에 데이터셋을 포함해 학습할 준비가 되었습니다! ```py >>> import torch >>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) ``` ## UNet2DModel 생성하기 🧨 Diffusers에 사전학습된 모델들은 모델 클래스에서 원하는 파라미터로 쉽게 생성할 수 있습니다. 예를 들어, [`UNet2DModel`]를 생성하려면: ```py >>> from diffusers import UNet2DModel >>> model = UNet2DModel( ... sample_size=config.image_size, # 타겟 이미지 해상도 ... in_channels=3, # 입력 채널 수, RGB 이미지에서 3 ... out_channels=3, # 출력 채널 수 ... layers_per_block=2, # UNet 블럭당 몇 개의 ResNet 레이어가 사용되는지 ... block_out_channels=(128, 128, 256, 256, 512, 512), # 각 UNet 블럭을 위한 출력 채널 수 ... down_block_types=( ... "DownBlock2D", # 일반적인 ResNet 다운샘플링 블럭 ... "DownBlock2D", ... "DownBlock2D", ... "DownBlock2D", ... "AttnDownBlock2D", # spatial self-attention이 포함된 일반적인 ResNet 다운샘플링 블럭 ... "DownBlock2D", ... ), ... up_block_types=( ... "UpBlock2D", # 일반적인 ResNet 업샘플링 블럭 ... "AttnUpBlock2D", # spatial self-attention이 포함된 일반적인 ResNet 업샘플링 블럭 ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... ), ... ) ``` 샘플의 이미지 크기와 모델 출력 크기가 맞는지 빠르게 확인하기 위한 좋은 아이디어가 있습니다: ```py >>> sample_image = dataset[0]["images"].unsqueeze(0) >>> print("Input shape:", sample_image.shape) Input shape: torch.Size([1, 3, 128, 128]) >>> print("Output shape:", model(sample_image, timestep=0).sample.shape) Output shape: torch.Size([1, 3, 128, 128]) ``` 훌륭해요! 다음, 이미지에 약간의 노이즈를 더하기 위해 스케줄러가 필요합니다. ## 스케줄러 생성하기 스케줄러는 모델을 학습 또는 추론에 사용하는지에 따라 다르게 작동합니다. 추론시에, 스케줄러는 노이즈로부터 이미지를 생성합니다. 학습시 스케줄러는 diffusion 과정에서의 특정 포인트로부터 모델의 출력 또는 샘플을 가져와 *노이즈 스케줄* 과 *업데이트 규칙*에 따라 이미지에 노이즈를 적용합니다. `DDPMScheduler`를 보면 이전으로부터 `sample_image`에 랜덤한 노이즈를 더하는 `add_noise` 메서드를 사용합니다: ```py >>> import torch >>> from PIL import Image >>> from diffusers import DDPMScheduler >>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000) >>> noise = torch.randn(sample_image.shape) >>> timesteps = torch.LongTensor([50]) >>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps) >>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0]) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png) 모델의 학습 목적은 이미지에 더해진 노이즈를 예측하는 것입니다. 이 단계에서 손실은 다음과 같이 계산될 수 있습니다: ```py >>> import torch.nn.functional as F >>> noise_pred = model(noisy_image, timesteps).sample >>> loss = F.mse_loss(noise_pred, noise) ``` ## 모델 학습하기 지금까지, 모델 학습을 시작하기 위해 많은 부분을 갖추었으며 이제 남은 것은 모든 것을 조합하는 것입니다. 우선 옵티마이저(optimizer)와 학습률 스케줄러(learning rate scheduler)가 필요할 것입니다: ```py >>> from diffusers.optimization import get_cosine_schedule_with_warmup >>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) >>> lr_scheduler = get_cosine_schedule_with_warmup( ... optimizer=optimizer, ... num_warmup_steps=config.lr_warmup_steps, ... num_training_steps=(len(train_dataloader) * config.num_epochs), ... ) ``` 그 후, 모델을 평가하는 방법이 필요합니다. 평가를 위해, `DDPMPipeline`을 사용해 배치의 이미지 샘플들을 생성하고 그리드 형태로 저장할 수 있습니다: ```py >>> from diffusers import DDPMPipeline >>> import math >>> import os >>> def make_grid(images, rows, cols): ... w, h = images[0].size ... grid = Image.new("RGB", size=(cols * w, rows * h)) ... for i, image in enumerate(images): ... grid.paste(image, box=(i % cols * w, i // cols * h)) ... return grid >>> def evaluate(config, epoch, pipeline): ... # 랜덤한 노이즈로 부터 이미지를 추출합니다.(이는 역전파 diffusion 과정입니다.) ... # 기본 파이프라인 출력 형태는 `List[PIL.Image]` 입니다. ... images = pipeline( ... batch_size=config.eval_batch_size, ... generator=torch.manual_seed(config.seed), ... ).images ... # 이미지들을 그리드로 만들어줍니다. ... image_grid = make_grid(images, rows=4, cols=4) ... # 이미지들을 저장합니다. ... test_dir = os.path.join(config.output_dir, "samples") ... os.makedirs(test_dir, exist_ok=True) ... image_grid.save(f"{test_dir}/{epoch:04d}.png") ``` TensorBoard에 로깅, 그래디언트 누적 및 혼합 정밀도 학습을 쉽게 수행하기 위해 🤗 Accelerate를 학습 루프에 함께 앞서 말한 모든 구성 정보들을 묶어 진행할 수 있습니다. 허브에 모델을 업로드 하기 위해 리포지토리 이름 및 정보를 가져오기 위한 함수를 작성하고 허브에 업로드할 수 있습니다. 💡아래의 학습 루프는 어렵고 길어 보일 수 있지만, 나중에 한 줄의 코드로 학습을 한다면 그만한 가치가 있을 것입니다! 만약 기다리지 못하고 이미지를 생성하고 싶다면, 아래 코드를 자유롭게 붙여넣고 작동시키면 됩니다. 🤗 ```py >>> from accelerate import Accelerator >>> from huggingface_hub import create_repo, upload_folder >>> from tqdm.auto import tqdm >>> from pathlib import Path >>> import os >>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ... # Initialize accelerator and tensorboard logging ... accelerator = Accelerator( ... mixed_precision=config.mixed_precision, ... gradient_accumulation_steps=config.gradient_accumulation_steps, ... log_with="tensorboard", ... project_dir=os.path.join(config.output_dir, "logs"), ... ) ... if accelerator.is_main_process: ... if config.output_dir is not None: ... os.makedirs(config.output_dir, exist_ok=True) ... if config.push_to_hub: ... repo_id = create_repo( ... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True ... ).repo_id ... accelerator.init_trackers("train_example") ... # 모든 것이 준비되었습니다. ... # 기억해야 할 특정한 순서는 없으며 준비한 방법에 제공한 것과 동일한 순서로 객체의 압축을 풀면 됩니다. ... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( ... model, optimizer, train_dataloader, lr_scheduler ... ) ... global_step = 0 ... # 이제 모델을 학습합니다. ... for epoch in range(config.num_epochs): ... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) ... progress_bar.set_description(f"Epoch {epoch}") ... for step, batch in enumerate(train_dataloader): ... clean_images = batch["images"] ... # 이미지에 더할 노이즈를 샘플링합니다. ... noise = torch.randn(clean_images.shape, device=clean_images.device) ... bs = clean_images.shape[0] ... # 각 이미지를 위한 랜덤한 타임스텝(timestep)을 샘플링합니다. ... timesteps = torch.randint( ... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device, ... dtype=torch.int64 ... ) ... # 각 타임스텝의 노이즈 크기에 따라 깨끗한 이미지에 노이즈를 추가합니다. ... # (이는 foward diffusion 과정입니다.) ... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) ... with accelerator.accumulate(model): ... # 노이즈를 반복적으로 예측합니다. ... noise_pred = model(noisy_images, timesteps, return_dict=False)[0] ... loss = F.mse_loss(noise_pred, noise) ... accelerator.backward(loss) ... accelerator.clip_grad_norm_(model.parameters(), 1.0) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} ... progress_bar.set_postfix(**logs) ... accelerator.log(logs, step=global_step) ... global_step += 1 ... # 각 에포크가 끝난 후 evaluate()와 몇 가지 데모 이미지를 선택적으로 샘플링하고 모델을 저장합니다. ... if accelerator.is_main_process: ... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) ... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: ... evaluate(config, epoch, pipeline) ... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: ... if config.push_to_hub: ... upload_folder( ... repo_id=repo_id, ... folder_path=config.output_dir, ... commit_message=f"Epoch {epoch}", ... ignore_patterns=["step_*", "epoch_*"], ... ) ... else: ... pipeline.save_pretrained(config.output_dir) ``` 휴, 코드가 꽤 많았네요! 하지만 🤗 Accelerate의 [`~accelerate.notebook_launcher`] 함수와 학습을 시작할 준비가 되었습니다. 함수에 학습 루프, 모든 학습 인수, 학습에 사용할 프로세스 수(사용 가능한 GPU의 수를 변경할 수 있음)를 전달합니다: ```py >>> from accelerate import notebook_launcher >>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) >>> notebook_launcher(train_loop, args, num_processes=1) ``` 한번 학습이 완료되면, diffusion 모델로 생성된 최종 🦋이미지🦋를 확인해보길 바랍니다! ```py >>> import glob >>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) >>> Image.open(sample_images[-1]) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png) ## 다음 단계 Unconditional 이미지 생성은 학습될 수 있는 작업 중 하나의 예시입니다. 다른 작업과 학습 방법은 [🧨 Diffusers 학습 예시](../training/overview) 페이지에서 확인할 수 있습니다. 다음은 학습할 수 있는 몇 가지 예시입니다: - [Textual Inversion](../training/text_inversion), 특정 시각적 개념을 학습시켜 생성된 이미지에 통합시키는 알고리즘입니다. - [DreamBooth](../training/dreambooth), 주제에 대한 몇 가지 입력 이미지들이 주어지면 주제에 대한 개인화된 이미지를 생성하기 위한 기술입니다. - [Guide](../training/text2image) 데이터셋에 Stable Diffusion 모델을 파인튜닝하는 방법입니다. - [Guide](../training/lora) LoRA를 사용해 매우 큰 모델을 빠르게 파인튜닝하기 위한 메모리 효율적인 기술입니다.
diffusers/docs/source/ko/tutorials/basic_training.md/0
{ "file_path": "diffusers/docs/source/ko/tutorials/basic_training.md", "repo_id": "diffusers", "token_count": 11286 }
98
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Deterministic(결정적) 생성을 통한 이미지 품질 개선 생성된 이미지의 품질을 개선하는 일반적인 방법은 *결정적 batch(배치) 생성*을 사용하는 것입니다. 이 방법은 이미지 batch(배치)를 생성하고 두 번째 추론 라운드에서 더 자세한 프롬프트와 함께 개선할 이미지 하나를 선택하는 것입니다. 핵심은 일괄 이미지 생성을 위해 파이프라인에 [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator) 목록을 전달하고, 각 `Generator`를 시드에 연결하여 이미지에 재사용할 수 있도록 하는 것입니다. 예를 들어 [`runwayml/stable-diffusion-v1-5`](runwayml/stable-diffusion-v1-5)를 사용하여 다음 프롬프트의 여러 버전을 생성해 봅시다. ```py prompt = "Labrador in the style of Vermeer" ``` (가능하다면) 파이프라인을 [`DiffusionPipeline.from_pretrained`]로 인스턴스화하여 GPU에 배치합니다. ```python >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") ``` 이제 네 개의 서로 다른 `Generator`를 정의하고 각 `Generator`에 시드(`0` ~ `3`)를 할당하여 나중에 특정 이미지에 대해 `Generator`를 재사용할 수 있도록 합니다. ```python >>> import torch >>> generator = [torch.Generator(device="cuda").manual_seed(i) for i in range(4)] ``` 이미지를 생성하고 살펴봅니다. ```python >>> images = pipe(prompt, generator=generator, num_images_per_prompt=4).images >>> images ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds.jpg) 이 예제에서는 첫 번째 이미지를 개선했지만 실제로는 원하는 모든 이미지를 사용할 수 있습니다(심지어 두 개의 눈이 있는 이미지도!). 첫 번째 이미지에서는 시드가 '0'인 '생성기'를 사용했기 때문에 두 번째 추론 라운드에서는 이 '생성기'를 재사용할 것입니다. 이미지의 품질을 개선하려면 프롬프트에 몇 가지 텍스트를 추가합니다: ```python prompt = [prompt + t for t in [", highly realistic", ", artsy", ", trending", ", colorful"]] generator = [torch.Generator(device="cuda").manual_seed(0) for i in range(4)] ``` 시드가 `0`인 제너레이터 4개를 생성하고, 이전 라운드의 첫 번째 이미지처럼 보이는 다른 이미지 batch(배치)를 생성합니다! ```python >>> images = pipe(prompt, generator=generator).images >>> images ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds_2.jpg)
diffusers/docs/source/ko/using-diffusers/reusing_seeds.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/reusing_seeds.md", "repo_id": "diffusers", "token_count": 1923 }
99
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 有效且高效的扩散 [[open-in-colab]] 让 [`DiffusionPipeline`] 生成特定风格或包含你所想要的内容的图像可能会有些棘手。 通常情况下,你需要多次运行 [`DiffusionPipeline`] 才能得到满意的图像。但是从无到有生成图像是一个计算密集的过程,特别是如果你要一遍又一遍地进行推理运算。 这就是为什么从pipeline中获得最高的 *computational* (speed) 和 *memory* (GPU RAM) 非常重要 ,以减少推理周期之间的时间,从而使迭代速度更快。 本教程将指导您如何通过 [`DiffusionPipeline`] 更快、更好地生成图像。 首先,加载 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 模型: ```python from diffusers import DiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True) ``` 本教程将使用的提示词是 [`portrait photo of a old warrior chief`] ,但是你可以随心所欲的想象和构造自己的提示词: ```python prompt = "portrait photo of a old warrior chief" ``` ## 速度 <Tip> 💡 如果你没有 GPU, 你可以从像 [Colab](https://colab.research.google.com/) 这样的 GPU 提供商获取免费的 GPU ! </Tip> 加速推理的最简单方法之一是将 pipeline 放在 GPU 上 ,就像使用任何 PyTorch 模块一样: ```python pipeline = pipeline.to("cuda") ``` 为了确保您可以使用相同的图像并对其进行改进,使用 [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) 方法,然后设置一个随机数种子 以确保其 [复现性](./using-diffusers/reproducibility): ```python import torch generator = torch.Generator("cuda").manual_seed(0) ``` 现在,你可以生成一个图像: ```python image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png"> </div> 在 T4 GPU 上,这个过程大概要30秒(如果你的 GPU 比 T4 好,可能会更快)。在默认情况下,[`DiffusionPipeline`] 使用完整的 `float32` 精度进行 50 步推理。你可以通过降低精度(如 `float16` )或者减少推理步数来加速整个过程 让我们把模型的精度降低至 `float16` ,然后生成一张图像: ```python import torch pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_safetensors=True) pipeline = pipeline.to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png"> </div> 这一次,生成图像只花了约 11 秒,比之前快了近 3 倍! <Tip> 💡 我们强烈建议把 pipeline 精度降低至 `float16` , 到目前为止, 我们很少看到输出质量有任何下降。 </Tip> 另一个选择是减少推理步数。 你可以选择一个更高效的调度器 (*scheduler*) 可以减少推理步数同时保证输出质量。您可以在 [DiffusionPipeline] 中通过调用compatibles方法找到与当前模型兼容的调度器 (*scheduler*)。 ```python pipeline.scheduler.compatibles [ diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, ] ``` Stable Diffusion 模型默认使用的是 [`PNDMScheduler`] ,通常要大概50步推理, 但是像 [`DPMSolverMultistepScheduler`] 这样更高效的调度器只要大概 20 或 25 步推理. 使用 [`ConfigMixin.from_config`] 方法加载新的调度器: ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) ``` 现在将 `num_inference_steps` 设置为 20: ```python generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png"> </div> 太棒了!你成功把推理时间缩短到 4 秒!⚡️ ## 内存 改善 pipeline 性能的另一个关键是减少内存的使用量,这间接意味着速度更快,因为你经常试图最大化每秒生成的图像数量。要想知道你一次可以生成多少张图片,最简单的方法是尝试不同的batch size,直到出现`OutOfMemoryError` (OOM)。 创建一个函数,为每一批要生成的图像分配提示词和 `Generators` 。请务必为每个`Generator` 分配一个种子,以便于复现良好的结果。 ```python def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} ``` 设置 `batch_size=4` ,然后看一看我们消耗了多少内存: ```python from diffusers.utils import make_image_grid images = pipeline(**get_inputs(batch_size=4)).images make_image_grid(images, 2, 2) ``` 除非你有一个更大内存的GPU, 否则上述代码会返回 `OOM` 错误! 大部分内存被 cross-attention 层使用。按顺序运行可以节省大量内存,而不是在批处理中进行。你可以为 pipeline 配置 [`~DiffusionPipeline.enable_attention_slicing`] 函数: ```python pipeline.enable_attention_slicing() ``` 现在尝试把 `batch_size` 增加到 8! ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png"> </div> 以前你不能一批生成 4 张图片,而现在你可以在一张图片里面生成八张图片而只需要大概3.5秒!这可能是 T4 GPU 在不牺牲质量的情况运行速度最快的一种方法。 ## 质量 在最后两节中, 你要学习如何通过 `fp16` 来优化 pipeline 的速度, 通过使用性能更高的调度器来减少推理步数, 使用注意力切片(*enabling attention slicing*)方法来节省内存。现在,你将关注的是如何提高图像的质量。 ### 更好的 checkpoints 有个显而易见的方法是使用更好的 checkpoints。 Stable Diffusion 模型是一个很好的起点, 自正式发布以来,还发布了几个改进版本。然而, 使用更新的版本并不意味着你会得到更好的结果。你仍然需要尝试不同的 checkpoints ,并做一些研究 (例如使用 [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/)) 来获得更好的结果。 随着该领域的发展, 有越来越多经过微调的高质量的 checkpoints 用来生成不一样的风格. 在 [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) 和 [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) 寻找你感兴趣的一种! ### 更好的 pipeline 组件 也可以尝试用新版本替换当前 pipeline 组件。让我们加载最新的 [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae) 从 Stability AI 加载到 pipeline, 并生成一些图像: ```python from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") pipeline.vae = vae images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png"> </div> ### 更好的提示词工程 用于生成图像的文本非常重要, 因此被称为 *提示词工程*。 在设计提示词工程应注意如下事项: - 我想生成的图像或类似图像如何存储在互联网上? - 我可以提供哪些额外的细节来引导模型朝着我想要的风格生成? 考虑到这一点,让我们改进提示词,以包含颜色和更高质量的细节: ```python prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" ``` 使用新的提示词生成一批图像: ```python images = pipeline(**get_inputs(batch_size=8)).images make_image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png"> </div> 非常的令人印象深刻! Let's tweak the second image - 把 `Generator` 的种子设置为 `1` - 添加一些关于年龄的主题文本: ```python prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images make_image_grid(images, 2, 2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png"> </div> ## 最后 在本教程中, 您学习了如何优化[`DiffusionPipeline`]以提高计算和内存效率,以及提高生成输出的质量. 如果你有兴趣让你的 pipeline 更快, 可以看一看以下资源: - 学习 [PyTorch 2.0](./optimization/torch2.0) 和 [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) 可以让推理速度提高 5 - 300% . 在 A100 GPU 上, 推理速度可以提高 50% ! - 如果你没法用 PyTorch 2, 我们建议你安装 [xFormers](./optimization/xformers)。它的内存高效注意力机制(*memory-efficient attention mechanism*)与PyTorch 1.13.1配合使用,速度更快,内存消耗更少。 - 其他的优化技术, 如:模型卸载(*model offloading*), 包含在 [这份指南](./optimization/fp16).
diffusers/docs/source/zh/stable_diffusion.md/0
{ "file_path": "diffusers/docs/source/zh/stable_diffusion.md", "repo_id": "diffusers", "token_count": 6189 }
100
import inspect from typing import Any, Dict, List, Optional, Union import torch import torch.nn as nn from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor from diffusers import DiffusionPipeline from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import LoraLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name class TranslatorBase(nn.Module): def __init__(self, num_tok, dim, dim_out, mult=2): super().__init__() self.dim_in = dim self.dim_out = dim_out self.net_tok = nn.Sequential( nn.Linear(num_tok, int(num_tok * mult)), nn.LayerNorm(int(num_tok * mult)), nn.GELU(), nn.Linear(int(num_tok * mult), int(num_tok * mult)), nn.LayerNorm(int(num_tok * mult)), nn.GELU(), nn.Linear(int(num_tok * mult), num_tok), nn.LayerNorm(num_tok), ) self.net_sen = nn.Sequential( nn.Linear(dim, int(dim * mult)), nn.LayerNorm(int(dim * mult)), nn.GELU(), nn.Linear(int(dim * mult), int(dim * mult)), nn.LayerNorm(int(dim * mult)), nn.GELU(), nn.Linear(int(dim * mult), dim_out), nn.LayerNorm(dim_out), ) def forward(self, x): if self.dim_in == self.dim_out: indentity_0 = x x = self.net_sen(x) x += indentity_0 x = x.transpose(1, 2) indentity_1 = x x = self.net_tok(x) x += indentity_1 x = x.transpose(1, 2) else: x = self.net_sen(x) x = x.transpose(1, 2) x = self.net_tok(x) x = x.transpose(1, 2) return x class TranslatorBaseNoLN(nn.Module): def __init__(self, num_tok, dim, dim_out, mult=2): super().__init__() self.dim_in = dim self.dim_out = dim_out self.net_tok = nn.Sequential( nn.Linear(num_tok, int(num_tok * mult)), nn.GELU(), nn.Linear(int(num_tok * mult), int(num_tok * mult)), nn.GELU(), nn.Linear(int(num_tok * mult), num_tok), ) self.net_sen = nn.Sequential( nn.Linear(dim, int(dim * mult)), nn.GELU(), nn.Linear(int(dim * mult), int(dim * mult)), nn.GELU(), nn.Linear(int(dim * mult), dim_out), ) def forward(self, x): if self.dim_in == self.dim_out: indentity_0 = x x = self.net_sen(x) x += indentity_0 x = x.transpose(1, 2) indentity_1 = x x = self.net_tok(x) x += indentity_1 x = x.transpose(1, 2) else: x = self.net_sen(x) x = x.transpose(1, 2) x = self.net_tok(x) x = x.transpose(1, 2) return x class TranslatorNoLN(nn.Module): def __init__(self, num_tok, dim, dim_out, mult=2, depth=5): super().__init__() self.blocks = nn.ModuleList([TranslatorBase(num_tok, dim, dim, mult=2) for d in range(depth)]) self.gelu = nn.GELU() self.tail = TranslatorBaseNoLN(num_tok, dim, dim_out, mult=2) def forward(self, x): for block in self.blocks: x = block(x) + x x = self.gelu(x) x = self.tail(x) return x def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class GlueGenStableDiffusionPipeline(DiffusionPipeline, LoraLoaderMixin): def __init__( self, vae: AutoencoderKL, text_encoder: AutoModel, tokenizer: AutoTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, language_adapter: TranslatorNoLN = None, tensor_norm: torch.FloatTensor = None, requires_safety_checker: bool = True, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, language_adapter=language_adapter, tensor_norm=tensor_norm, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def load_language_adapter( self, model_path: str, num_token: int, dim: int, dim_out: int, tensor_norm: torch.FloatTensor, mult: int = 2, depth: int = 5, ): device = self._execution_device self.tensor_norm = tensor_norm.to(device) self.language_adapter = TranslatorNoLN(num_tok=num_token, dim=dim, dim_out=dim_out, mult=mult, depth=depth).to( device ) self.language_adapter.load_state_dict(torch.load(model_path)) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def _adapt_language(self, prompt_embeds: torch.FloatTensor): prompt_embeds = prompt_embeds / 3 prompt_embeds = self.language_adapter(prompt_embeds) * (self.tensor_norm / 2) return prompt_embeds def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) elif self.language_adapter is not None: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) # Run prompt language adapter if self.language_adapter is not None: prompt_embeds = self._adapt_language(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] # Run negative prompt language adapter if self.language_adapter is not None: negative_prompt_embeds = self._adapt_language(negative_prompt_embeds) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stages where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if not hasattr(self, "unet"): raise ValueError("The pipeline must have `unet` for using FreeU.") self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) def disable_freeu(self): """Disables the FreeU mechanism if enabled.""" self.unet.disable_freeu() # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, clip_skip: Optional[int] = None, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # to deal with lora scaling and other possible forward hooks # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.2 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/gluegen.py/0
{ "file_path": "diffusers/examples/community/gluegen.py", "repo_id": "diffusers", "token_count": 17627 }
101
from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from diffusers import StableDiffusionImg2ImgPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline): debug_save = False @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: Union[ torch.FloatTensor, PIL.Image.Image, np.ndarray, List[torch.FloatTensor], List[PIL.Image.Image], List[np.ndarray], ] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, mask: Union[ torch.FloatTensor, PIL.Image.Image, np.ndarray, List[torch.FloatTensor], List[PIL.Image.Image], List[np.ndarray], ] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be used as the starting point. Can also accept image latents as `image`, but if passing latents directly it is not encoded again. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*): A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # code adapted from parent class StableDiffusionImg2ImgPipeline # 0. Check inputs. Raise error if not correct self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) # 1. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 2. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 3. Preprocess image image = self.image_processor.preprocess(image) # 4. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 5. Prepare latent variables # it is sampled from the latent distribution of the VAE latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator ) # mean of the latent distribution init_latents = [ self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) # 6. create latent mask latent_mask = self._make_latent_mask(latents, mask) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if latent_mask is not None: latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask) noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": scaled = latents / self.vae.config.scaling_factor if latent_mask is not None: # scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask) scaled = torch.lerp(init_latents, scaled, latent_mask) image = self.vae.decode(scaled, return_dict=False)[0] if self.debug_save: image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True]) image_gen[0].save("from_latent.png") image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def _make_latent_mask(self, latents, mask): if mask is not None: latent_mask = [] if not isinstance(mask, list): tmp_mask = [mask] else: tmp_mask = mask _, l_channels, l_height, l_width = latents.shape for m in tmp_mask: if not isinstance(m, PIL.Image.Image): if len(m.shape) == 2: m = m[..., np.newaxis] if m.max() > 1: m = m / 255.0 m = self.image_processor.numpy_to_pil(m)[0] if m.mode != "L": m = m.convert("L") resized = self.image_processor.resize(m, l_height, l_width) if self.debug_save: resized.save("latent_mask.png") latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0)) latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents) latent_mask = latent_mask / latent_mask.max() return latent_mask
diffusers/examples/community/masked_stable_diffusion_img2img.py/0
{ "file_path": "diffusers/examples/community/masked_stable_diffusion_img2img.py", "repo_id": "diffusers", "token_count": 6312 }
102
# A diffuser version implementation of Zero1to3 (https://github.com/cvlab-columbia/zero123), ICCV 2023 # by Xin Kong import inspect from typing import Any, Callable, Dict, List, Optional, Union import kornia import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection # from ...configuration_utils import FrozenDict # from ...models import AutoencoderKL, UNet2DConditionModel # from ...schedulers import KarrasDiffusionSchedulers # from ...utils import ( # deprecate, # is_accelerate_available, # is_accelerate_version, # logging, # randn_tensor, # replace_example_docstring, # ) # from ..pipeline_utils import DiffusionPipeline # from . import StableDiffusionPipelineOutput # from .safety_checker import StableDiffusionSafetyChecker from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel from diffusers.configuration_utils import ConfigMixin, FrozenDict from diffusers.models.modeling_utils import ModelMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, is_accelerate_available, is_accelerate_version, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name # todo EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] ``` """ class CCProjection(ModelMixin, ConfigMixin): def __init__(self, in_channel=772, out_channel=768): super().__init__() self.in_channel = in_channel self.out_channel = out_channel self.projection = torch.nn.Linear(in_channel, out_channel) def forward(self, x): return self.projection(x) class Zero1to3StableDiffusionPipeline(DiffusionPipeline): r""" Pipeline for single view conditioned novel view generation using Zero1to3. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. image_encoder ([`CLIPVisionModelWithProjection`]): Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPFeatureExtractor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. cc_projection ([`CCProjection`]): Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, image_encoder: CLIPVisionModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPFeatureExtractor, cc_projection: CCProjection, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, cc_projection=cc_projection, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) # self.model_mode = None def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful to save a large amount of memory and to allow the processing of larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): from accelerate import cpu_offload else: raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) hook = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) if self.safety_checker is not None: _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) # We'll offload the last model manually. self.final_offload_hook = hook @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def CLIP_preprocess(self, x): dtype = x.dtype # following openai's implementation # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741 # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608 if isinstance(x, torch.Tensor): if x.min() < -1.0 or x.max() > 1.0: raise ValueError("Expected input tensor to have values in the range [-1, 1]") x = kornia.geometry.resize( x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False ).to(dtype=dtype) x = (x + 1.0) / 2.0 # renormalize according to clip x = kornia.enhance.normalize( x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711]) ) return x # from image_variation def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) assert image.ndim == 4, "Image must have 4 dimensions" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 image = image.to(device=device, dtype=dtype) image = self.CLIP_preprocess(image) # if not isinstance(image, torch.Tensor): # # 0-255 # print("Warning: image is processed by hf's preprocess, which is different from openai original's.") # image = self.feature_extractor(images=image, return_tensors="pt").pixel_values image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype) image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings def _encode_pose(self, pose, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.cc_projection.parameters()).dtype if isinstance(pose, torch.Tensor): pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype) else: if isinstance(pose[0], list): pose = torch.Tensor(pose) else: pose = torch.Tensor([pose]) x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1) pose_embeddings = ( torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1) .unsqueeze(1) .to(device=device, dtype=dtype) ) # B, 1, 4 # duplicate pose embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = pose_embeddings.shape pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1) pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(pose_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings]) return pose_embeddings def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_classifier_free_guidance): img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False) pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False) prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1) prompt_embeds = self.cc_projection(prompt_embeds) # prompt_embeds = img_prompt_embeds # follow 0123, add negative prompt, after projection if do_classifier_free_guidance: negative_prompt = torch.zeros_like(prompt_embeds) prompt_embeds = torch.cat([negative_prompt, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, image, height, width, callback_steps): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_img_latents(self, image, batch_size, dtype, device, generator=None, do_classifier_free_guidance=False): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) assert image.ndim == 4, "Image must have 4 dimensions" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 image = image.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.mode() # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor if batch_size > init_latents.shape[0]: # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1) num_images_per_prompt = batch_size // init_latents.shape[0] # duplicate image latents for each generation per prompt, using mps friendly method bs_embed, emb_c, emb_h, emb_w = init_latents.shape init_latents = init_latents.unsqueeze(1) init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1) init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w) # init_latents = torch.cat([init_latents]*2) if do_classifier_free_guidance else init_latents # follow zero123 init_latents = ( torch.cat([torch.zeros_like(init_latents), init_latents]) if do_classifier_free_guidance else init_latents ) init_latents = init_latents.to(device=device, dtype=dtype) return init_latents # def load_cc_projection(self, pretrained_weights=None): # self.cc_projection = torch.nn.Linear(772, 768) # torch.nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) # torch.nn.init.zeros_(list(self.cc_projection.parameters())[1]) # if pretrained_weights is not None: # self.cc_projection.load_state_dict(pretrained_weights) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, input_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None, prompt_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None, poses: Union[List[float], List[List[float]]] = None, torch_dtype=torch.float32, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 3.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: float = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: input_imgs (`PIL` or `List[PIL]`, *optional*): The single input image for each 3D object prompt_imgs (`PIL` or `List[PIL]`, *optional*): Same as input_imgs, but will be used later as an image prompt condition, encoded by CLIP feature height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct # input_image = hint_imgs self.check_inputs(input_imgs, height, width, callback_steps) # 2. Define call parameters if isinstance(input_imgs, PIL.Image.Image): batch_size = 1 elif isinstance(input_imgs, list): batch_size = len(input_imgs) else: batch_size = input_imgs.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input image with pose as prompt prompt_embeds = self._encode_image_with_pose( prompt_imgs, poses, device, num_images_per_prompt, do_classifier_free_guidance ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables latents = self.prepare_latents( batch_size * num_images_per_prompt, 4, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare image latents img_latents = self.prepare_img_latents( input_imgs, batch_size * num_images_per_prompt, prompt_embeds.dtype, device, generator, do_classifier_free_guidance, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, img_latents], dim=1) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 # latents = self.scheduler.step(noise_pred.to(dtype=torch.float32), t, latents.to(dtype=torch.float32)).prev_sample.to(prompt_embeds.dtype) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post-processing has_nsfw_concept = None if output_type == "latent": image = latents elif output_type == "pil": # 8. Post-processing image = self.decode_latents(latents) # 10. Convert to PIL image = self.numpy_to_pil(image) else: # 8. Post-processing image = self.decode_latents(latents) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/pipeline_zero1to3.py/0
{ "file_path": "diffusers/examples/community/pipeline_zero1to3.py", "repo_id": "diffusers", "token_count": 19739 }
103
from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.configuration_utils import FrozenDict from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class StableDiffusionMegaPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionMegaSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config(requires_safety_checker=requires_safety_checker) @property def components(self) -> Dict[str, Any]: return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")} def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): r""" Enable sliced attention computation. When this option is enabled, the attention module will split the input tensor in slices, to compute attention in several steps. This is useful to save some memory in exchange for a small speed decrease. Args: slice_size (`str` or `int`, *optional*, defaults to `"auto"`): When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(slice_size) def disable_attention_slicing(self): r""" Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go back to computing attention in one step. """ # set slice_size = `None` to disable `attention slicing` self.enable_attention_slicing(None) @torch.no_grad() def inpaint( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image], mask_image: Union[torch.FloatTensor, PIL.Image.Image], strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, ): # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline return StableDiffusionInpaintPipelineLegacy(**self.components)( prompt=prompt, image=image, mask_image=mask_image, strength=strength, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, output_type=output_type, return_dict=return_dict, callback=callback, ) @torch.no_grad() def img2img( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image], strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, **kwargs, ): # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline return StableDiffusionImg2ImgPipeline(**self.components)( prompt=prompt, image=image, strength=strength, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, ) @torch.no_grad() def text2img( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, ): # For more information on how this function https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline return StableDiffusionPipeline(**self.components)( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, )
diffusers/examples/community/stable_diffusion_mega.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_mega.py", "repo_id": "diffusers", "token_count": 4380 }
104
# Custom Diffusion training example [Custom Diffusion](https://arxiv.org/abs/2212.04488) is a method to customize text-to-image models like Stable Diffusion given just a few (4~5) images of a subject. The `train_custom_diffusion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt pip install clip-retrieval ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### Cat example 😺 Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it. We also collect 200 real images using `clip-retrieval` which are combined with the target images in the training dataset as a regularization. This prevents overfitting to the given target image. The following flags enable the regularization `with_prior_preservation`, `real_prior` with `prior_loss_weight=1.`. The `class_prompt` should be the category name same as target image. The collected real images are with text captions similar to the `class_prompt`. The retrieved image are saved in `class_data_dir`. You can disable `real_prior` to use generated images as regularization. To collect the real images use this command first before training. ```bash pip install clip-retrieval python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200 ``` **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" export INSTANCE_DIR="./data/cat" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_cat/ \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --class_prompt="cat" --num_class_images=200 \ --instance_prompt="photo of a <new1> cat" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=250 \ --scale_lr --hflip \ --modifier_token "<new1>" ``` **Use `--enable_xformers_memory_efficient_attention` for faster training with lower VRAM requirement (16GB per GPU). Follow [this guide](https://github.com/facebookresearch/xformers) for installation instructions.** To track your experiments using Weights and Biases (`wandb`) and to save intermediate results (which we HIGHLY recommend), follow these steps: * Install `wandb`: `pip install wandb`. * Authorize: `wandb login`. * Then specify a `validation_prompt` and set `report_to` to `wandb` while launching training. You can also configure the following related arguments: * `num_validation_images` * `validation_steps` Here is an example command: ```bash accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_cat/ \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --class_prompt="cat" --num_class_images=200 \ --instance_prompt="photo of a <new1> cat" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=250 \ --scale_lr --hflip \ --modifier_token "<new1>" \ --validation_prompt="<new1> cat sitting in a bucket" \ --report_to="wandb" ``` Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau) where you can check out the intermediate results along with other training details. If you specify `--push_to_hub`, the learned parameters will be pushed to a repository on the Hugging Face Hub. Here is an [example repository](https://huggingface.co/sayakpaul/custom-diffusion-cat). ### Training on multiple concepts 🐱🪵 Provide a [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with the info about each concept, similar to [this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py). To collect the real images run this command for each concept in the json file. ```bash pip install clip-retrieval python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200 ``` And then we're ready to start training! ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --output_dir=$OUTPUT_DIR \ --concepts_list=./concept_list.json \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --num_class_images=200 \ --scale_lr --hflip \ --modifier_token "<new1>+<new2>" ``` Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg) where you can check out the intermediate results along with other training details. ### Training on human faces For fine-tuning on human faces we found the following configuration to work better: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, and `freeze_model=crossattn` with at least 15-20 images. To collect the real images use this command first before training. ```bash pip install clip-retrieval python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200 ``` Then start training! ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" export INSTANCE_DIR="path-to-images" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_person/ \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --class_prompt="person" --num_class_images=200 \ --instance_prompt="photo of a <new1> person" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=5e-6 \ --lr_warmup_steps=0 \ --max_train_steps=1000 \ --scale_lr --hflip --noaug \ --freeze_model crossattn \ --modifier_token "<new1>" \ --enable_xformers_memory_efficient_attention ``` ## Inference Once you have trained a model using the above command, you can run inference using the below command. Make sure to include the `modifier token` (e.g. \<new1\> in above example) in your prompt. ```python import torch from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 ).to("cuda") pipe.unet.load_attn_procs( "path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin" ) pipe.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin") image = pipe( "<new1> cat sitting in a bucket", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("cat.png") ``` It's possible to directly load these parameters from a Hub repository: ```python import torch from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline model_id = "sayakpaul/custom-diffusion-cat" card = RepoCard.load(model_id) base_model_id = card.data.to_dict()["base_model"] pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to( "cuda") pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") pipe.load_textual_inversion(model_id, weight_name="<new1>.bin") image = pipe( "<new1> cat sitting in a bucket", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("cat.png") ``` Here is an example of performing inference with multiple concepts: ```python import torch from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline model_id = "sayakpaul/custom-diffusion-cat-wooden-pot" card = RepoCard.load(model_id) base_model_id = card.data.to_dict()["base_model"] pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to( "cuda") pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") pipe.load_textual_inversion(model_id, weight_name="<new1>.bin") pipe.load_textual_inversion(model_id, weight_name="<new2>.bin") image = pipe( "the <new1> cat sculpture in the style of a <new2> wooden pot", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("multi-subject.png") ``` Here, `cat` and `wooden pot` refer to the multiple concepts. ### Inference from a training checkpoint You can also perform inference from one of the complete checkpoint saved during the training process, if you used the `--checkpointing_steps` argument. TODO. ## Set grads to none To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html ## Experimental results You can refer to [our webpage](https://www.cs.cmu.edu/~custom-diffusion/) that discusses our experiments in detail. We also released a more extensive dataset of 101 concepts for evaluating model customization methods. For more details please refer to our [dataset webpage](https://www.cs.cmu.edu/~custom-diffusion/dataset.html).
diffusers/examples/custom_diffusion/README.md/0
{ "file_path": "diffusers/examples/custom_diffusion/README.md", "repo_id": "diffusers", "token_count": 3550 }
105
# Inference Examples **The inference examples folder is deprecated and will be removed in a future version**. **Officially supported inference examples can be found in the [Pipelines folder](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines)**. - For `Image-to-Image text-guided generation with Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) - For `In-painting using Stable Diffusion`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples) - For `Tweak prompts reusing seeds and latents`, please have a look at the official [Pipeline examples](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines#examples)
diffusers/examples/inference/README.md/0
{ "file_path": "diffusers/examples/inference/README.md", "repo_id": "diffusers", "token_count": 252 }
106
import d4rl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline config = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": env_name = "hopper-medium-v2" env = gym.make(env_name) pipeline = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) obs = env.reset() total_reward = 0 total_score = 0 T = 1000 rollout = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy denorm_actions = pipeline(obs, planning_horizon=32) # execute action in environment next_observation, reward, terminal, _ = env.step(denorm_actions) score = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" f" {total_score}" ) # save observations for rendering rollout.append(next_observation.copy()) obs = next_observation except KeyboardInterrupt: pass print(f"Total reward: {total_reward}")
diffusers/examples/reinforcement_learning/run_diffuser_locomotion.py/0
{ "file_path": "diffusers/examples/reinforcement_learning/run_diffuser_locomotion.py", "repo_id": "diffusers", "token_count": 724 }
107
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ControlNetXSModel, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.import_utils import is_invisible_watermark_available from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker logger = logging.get_logger(__name__) # pylint: disable=invalid-name class StableDiffusionXLControlNetXSPipeline( DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin ): r""" Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet-XS guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): Second frozen text-encoder ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. tokenizer_2 ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetXSModel`]: Provides additional conditioning to the `unet` during the denoising process. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings should always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no watermarker is used. """ # leave controlnet out on purpose because it iterates with unet model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae->controlnet" _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: ControlNetXSModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() vae_compatible, cnxs_condition_downsample_factor, vae_downsample_factor = controlnet._check_if_vae_compatible( vae ) if not vae_compatible: raise ValueError( f"The downsampling factors of the VAE ({vae_downsample_factor}) and the conditioning part of ControlNetXS model {cnxs_condition_downsample_factor} need to be equal. Consider building the ControlNetXS model with different `conditioning_block_sizes`." ) self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: procecss multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, image, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetXSModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetXSModel) ): self.check_image(image, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetXSModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetXSModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") else: assert False start, end = control_guidance_start, control_guidance_end if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stages where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if not hasattr(self, "unet"): raise ValueError("The pipeline must have `unet` for using FreeU.") self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu def disable_freeu(self): """Disables the FreeU mechanism if enabled.""" self.unet.disable_freeu() @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, control_guidance_start: float = 0.0, control_guidance_end: float = 1.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled text embeddings are generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. control_guidance_start (`float`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] is returned, otherwise a `tuple` is returned containing the output images. """ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, image, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt, prompt_2, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # 4. Prepare image if isinstance(controlnet, ControlNetXSModel): image = self.prepare_image( image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) height, width = image.shape[-2:] else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Prepare added time ids & embeddings if isinstance(image, list): original_size = original_size or image[0].shape[-2:] else: original_size = original_size or image.shape[-2:] target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # predict the noise residual dont_control = ( i / len(timesteps) < control_guidance_start or (i + 1) / len(timesteps) > control_guidance_end ) if dont_control: noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=True, ).sample else: noise_pred = self.controlnet( base_model=self.unet, sample=latent_model_input, timestep=t, encoder_hidden_states=prompt_embeds, controlnet_cond=image, conditioning_scale=controlnet_conditioning_scale, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=True, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/research_projects/controlnetxs/pipeline_controlnet_xs_sd_xl.py/0
{ "file_path": "diffusers/examples/research_projects/controlnetxs/pipeline_controlnet_xs_sd_xl.py", "repo_id": "diffusers", "token_count": 24961 }
108
# Distillation for quantization on Textual Inversion models to personalize text2image [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images._By using just 3-5 images new concepts can be taught to Stable Diffusion and the model personalized on your own images_ The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. We have enabled distillation for quantization in `textual_inversion.py` to do quantization aware training as well as distillation on the model generated by Textual Inversion method. ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -r requirements.txt ``` ## Prepare Datasets One picture which is from the huggingface datasets [sd-concepts-library/dicoo2](https://huggingface.co/sd-concepts-library/dicoo2) is needed, and save it to the `./dicoo` directory. The picture is shown below: <a href="https://huggingface.co/sd-concepts-library/dicoo2/blob/main/concept_images/1.jpeg"> <img src="https://huggingface.co/sd-concepts-library/dicoo2/resolve/main/concept_images/1.jpeg" width = "300" height="300"> </a> ## Get a FP32 Textual Inversion model Use the following command to fine-tune the Stable Diffusion model on the above dataset to obtain the FP32 Textual Inversion model. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATA_DIR="./dicoo" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="dicoo_model" ``` ## Do distillation for quantization Distillation for quantization is a method that combines [intermediate layer knowledge distillation](https://github.com/intel/neural-compressor/blob/master/docs/source/distillation.md#intermediate-layer-knowledge-distillation) and [quantization aware training](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#quantization-aware-training) in the same training process to improve the performance of the quantized model. Provided a FP32 model, the distillation for quantization approach will take this model itself as the teacher model and transfer the knowledges of the specified layers to the student model, i.e. quantized version of the FP32 model, during the quantization aware training process. Once you have the FP32 Textual Inversion model, the following command will take the FP32 Textual Inversion model as input to do distillation for quantization and generate the INT8 Textual Inversion model. ```bash export FP32_MODEL_NAME="./dicoo_model" export DATA_DIR="./dicoo" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$FP32_MODEL_NAME \ --train_data_dir=$DATA_DIR \ --use_ema --learnable_property="object" \ --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=300 \ --learning_rate=5.0e-04 --max_grad_norm=3 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="int8_model" \ --do_quantization --do_distillation --verify_loading ``` After the distillation for quantization process, the quantized UNet would be 4 times smaller (3279MB -> 827MB). ## Inference Once you have trained a INT8 model with the above command, the inference can be done simply using the `text2images.py` script. Make sure to include the `placeholder_token` in your prompt. ```bash export INT8_MODEL_NAME="./int8_model" python text2images.py \ --pretrained_model_name_or_path=$INT8_MODEL_NAME \ --caption "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings." \ --images_num 4 ``` Here is the comparison of images generated by the FP32 model (left) and INT8 model (right) respectively: <p float="left"> <img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/FP32.png" width = "300" height = "300" alt="FP32" align=center /> <img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/INT8.png" width = "300" height = "300" alt="INT8" align=center /> </p>
diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md", "repo_id": "diffusers", "token_count": 1443 }
109
# RealFill [RealFill](https://arxiv.org/abs/2309.16668) is a method to personalize text2image inpainting models like stable diffusion inpainting given just a few(1~5) images of a scene. The `train_realfill.py` script shows how to implement the training procedure for stable diffusion inpainting. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: cd to the realfill folder and run ```bash cd realfill pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. ### Toy example Now let's fill the real. For this example, we will use some images of the flower girl example from the paper. We already provide some images for testing in [this link](https://github.com/thuanz123/realfill/tree/main/data/flowerwoman) You only have to launch the training using: ```bash export MODEL_NAME="stabilityai/stable-diffusion-2-inpainting" export TRAIN_DIR="data/flowerwoman" export OUTPUT_DIR="flowerwoman-model" accelerate launch train_realfill.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --output_dir=$OUTPUT_DIR \ --resolution=512 \ --train_batch_size=16 \ --gradient_accumulation_steps=1 \ --unet_learning_rate=2e-4 \ --text_encoder_learning_rate=4e-5 \ --lr_scheduler="constant" \ --lr_warmup_steps=100 \ --max_train_steps=2000 \ --lora_rank=8 \ --lora_dropout=0.1 \ --lora_alpha=16 \ ``` ### Training on a low-memory GPU: It is possible to run realfill on a low-memory GPU by using the following optimizations: - [gradient checkpointing and the 8-bit optimizer](#training-with-gradient-checkpointing-and-8-bit-optimizers) - [xformers](#training-with-xformers) - [setting grads to none](#set-grads-to-none) ```bash export MODEL_NAME="stabilityai/stable-diffusion-2-inpainting" export TRAIN_DIR="data/flowerwoman" export OUTPUT_DIR="flowerwoman-model" accelerate launch train_realfill.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --output_dir=$OUTPUT_DIR \ --resolution=512 \ --train_batch_size=16 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --unet_learning_rate=2e-4 \ --text_encoder_learning_rate=4e-5 \ --lr_scheduler="constant" \ --lr_warmup_steps=100 \ --max_train_steps=2000 \ --lora_rank=8 \ --lora_dropout=0.1 \ --lora_alpha=16 \ ``` ### Training with gradient checkpointing and 8-bit optimizers: With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train realfill on a 16GB GPU. To install `bitsandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. ### Set grads to none To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html ## Acknowledge This repo is built upon the code of DreamBooth from diffusers and we thank the developers for their great works and efforts to release source code. Furthermore, a special "thank you" to RealFill's authors for publishing such an amazing work.
diffusers/examples/research_projects/realfill/README.md/0
{ "file_path": "diffusers/examples/research_projects/realfill/README.md", "repo_id": "diffusers", "token_count": 1321 }
110
import argparse import logging import math import os import random from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import PIL import torch import torch.utils.checkpoint import transformers from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, FlaxPNDMScheduler, FlaxStableDiffusionPipeline, FlaxUNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.26.0.dev0") logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--use_auth_token", action="store_true", help=( "Will use the token generated when running `huggingface-cli login` (necessary to use this script with" " private models)." ), ) parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): if model.config.vocab_size == new_num_tokens or new_num_tokens is None: return model.config.vocab_size = new_num_tokens params = model.params old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] old_num_tokens, emb_dim = old_embeddings.shape initializer = jax.nn.initializers.normal() new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings model.params = params return model def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() if args.seed is not None: set_seed(args.seed) if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae, vae_params = FlaxAutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision ) unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) # Create sampling rng rng = jax.random.PRNGKey(args.seed) rng, _ = jax.random.split(rng) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder = resize_token_embeddings( text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng ) original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.stack([example["input_ids"] for example in examples]) batch = {"pixel_values": pixel_values, "input_ids": input_ids} batch = {k: v.numpy() for k, v in batch.items()} return batch total_train_batch_size = args.train_batch_size * jax.local_device_count() train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn ) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) optimizer = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) def create_mask(params, label_fn): def _map(params, mask, label_fn): for k in params: if label_fn(k): mask[k] = "token_embedding" else: if isinstance(params[k], dict): mask[k] = {} _map(params[k], mask[k], label_fn) else: mask[k] = "zero" mask = {} _map(params, mask, label_fn) return mask def zero_grads(): # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491 def init_fn(_): return () def update_fn(updates, state, params=None): return jax.tree_util.tree_map(jnp.zeros_like, updates), () return optax.GradientTransformation(init_fn, update_fn) # Zero out gradients of layers other than the token embedding layer tx = optax.multi_transform( {"token_embedding": optimizer, "zero": zero_grads()}, create_mask(text_encoder.params, lambda s: s == "token_embedding"), ) state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) noise_scheduler = FlaxDDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 ) noise_scheduler_state = noise_scheduler.create_state() # Initialize our training train_rngs = jax.random.split(rng, jax.local_device_count()) # Define gradient train step fn def train_step(state, vae_params, unet_params, batch, train_rng): dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) def compute_loss(params): vae_outputs = vae.apply( {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) encoder_hidden_states = state.apply_fn( batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True )[0] # Predict the noise residual and compute loss model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) # Keep the token embeddings fixed except the newly added embeddings for the concept, # as we only want to optimize the concept embeddings token_embeds = original_token_embeds.at[placeholder_token_id].set( new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] ) new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics, new_train_rng # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) vae_params = jax_utils.replicate(vae_params) unet_params = jax_utils.replicate(unet_params) # Train! num_update_steps_per_epoch = math.ceil(len(train_dataloader)) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_metrics = [] steps_per_epoch = len(train_dataset) // total_train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_dataloader: batch = shard(batch) state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break if global_step % args.save_steps == 0: learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][ "embedding" ][placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save( os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict ) train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Create the pipeline using using the trained modules and save it. if jax.process_index() == 0: scheduler = FlaxPNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", from_pt=True ) pipeline = FlaxStableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained( args.output_dir, params={ "text_encoder": get_params_to_save(state.params), "vae": get_params_to_save(vae_params), "unet": get_params_to_save(unet_params), "safety_checker": safety_checker.params, }, ) # Also save the newly trained embeddings learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ placeholder_token_id ] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": main()
diffusers/examples/textual_inversion/textual_inversion_flax.py/0
{ "file_path": "diffusers/examples/textual_inversion/textual_inversion_flax.py", "repo_id": "diffusers", "token_count": 11076 }
111
import inspect import os from argparse import ArgumentParser import numpy as np import torch from muse import MaskGiTUViT, VQGANModel from muse import PipelineMuse as OldPipelineMuse from transformers import CLIPTextModelWithProjection, CLIPTokenizer from diffusers import VQModel from diffusers.models.attention_processor import AttnProcessor from diffusers.models.unets.uvit_2d import UVit2DModel from diffusers.pipelines.amused.pipeline_amused import AmusedPipeline from diffusers.schedulers import AmusedScheduler torch.backends.cuda.enable_flash_sdp(False) torch.backends.cuda.enable_mem_efficient_sdp(False) torch.backends.cuda.enable_math_sdp(True) os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(True) # Enable CUDNN deterministic mode torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.backends.cuda.matmul.allow_tf32 = False device = "cuda" def main(): args = ArgumentParser() args.add_argument("--model_256", action="store_true") args.add_argument("--write_to", type=str, required=False, default=None) args.add_argument("--transformer_path", type=str, required=False, default=None) args = args.parse_args() transformer_path = args.transformer_path subfolder = "transformer" if transformer_path is None: if args.model_256: transformer_path = "openMUSE/muse-256" else: transformer_path = ( "../research-run-512-checkpoints/research-run-512-with-downsample-checkpoint-554000/unwrapped_model/" ) subfolder = None old_transformer = MaskGiTUViT.from_pretrained(transformer_path, subfolder=subfolder) old_transformer.to(device) old_vae = VQGANModel.from_pretrained("openMUSE/muse-512", subfolder="vae") old_vae.to(device) vqvae = make_vqvae(old_vae) tokenizer = CLIPTokenizer.from_pretrained("openMUSE/muse-512", subfolder="text_encoder") text_encoder = CLIPTextModelWithProjection.from_pretrained("openMUSE/muse-512", subfolder="text_encoder") text_encoder.to(device) transformer = make_transformer(old_transformer, args.model_256) scheduler = AmusedScheduler(mask_token_id=old_transformer.config.mask_token_id) new_pipe = AmusedPipeline( vqvae=vqvae, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, scheduler=scheduler ) old_pipe = OldPipelineMuse( vae=old_vae, transformer=old_transformer, text_encoder=text_encoder, tokenizer=tokenizer ) old_pipe.to(device) if args.model_256: transformer_seq_len = 256 orig_size = (256, 256) else: transformer_seq_len = 1024 orig_size = (512, 512) old_out = old_pipe( "dog", generator=torch.Generator(device).manual_seed(0), transformer_seq_len=transformer_seq_len, orig_size=orig_size, timesteps=12, )[0] new_out = new_pipe("dog", generator=torch.Generator(device).manual_seed(0)).images[0] old_out = np.array(old_out) new_out = np.array(new_out) diff = np.abs(old_out.astype(np.float64) - new_out.astype(np.float64)) # assert diff diff.sum() == 0 print("skipping pipeline full equivalence check") print(f"max diff: {diff.max()}, diff.sum() / diff.size {diff.sum() / diff.size}") if args.model_256: assert diff.max() <= 3 assert diff.sum() / diff.size < 0.7 else: assert diff.max() <= 1 assert diff.sum() / diff.size < 0.4 if args.write_to is not None: new_pipe.save_pretrained(args.write_to) def make_transformer(old_transformer, model_256): args = dict(old_transformer.config) force_down_up_sample = args["force_down_up_sample"] signature = inspect.signature(UVit2DModel.__init__) args_ = { "downsample": force_down_up_sample, "upsample": force_down_up_sample, "block_out_channels": args["block_out_channels"][0], "sample_size": 16 if model_256 else 32, } for s in list(signature.parameters.keys()): if s in ["self", "downsample", "upsample", "sample_size", "block_out_channels"]: continue args_[s] = args[s] new_transformer = UVit2DModel(**args_) new_transformer.to(device) new_transformer.set_attn_processor(AttnProcessor()) state_dict = old_transformer.state_dict() state_dict["cond_embed.linear_1.weight"] = state_dict.pop("cond_embed.0.weight") state_dict["cond_embed.linear_2.weight"] = state_dict.pop("cond_embed.2.weight") for i in range(22): state_dict[f"transformer_layers.{i}.norm1.norm.weight"] = state_dict.pop( f"transformer_layers.{i}.attn_layer_norm.weight" ) state_dict[f"transformer_layers.{i}.norm1.linear.weight"] = state_dict.pop( f"transformer_layers.{i}.self_attn_adaLN_modulation.mapper.weight" ) state_dict[f"transformer_layers.{i}.attn1.to_q.weight"] = state_dict.pop( f"transformer_layers.{i}.attention.query.weight" ) state_dict[f"transformer_layers.{i}.attn1.to_k.weight"] = state_dict.pop( f"transformer_layers.{i}.attention.key.weight" ) state_dict[f"transformer_layers.{i}.attn1.to_v.weight"] = state_dict.pop( f"transformer_layers.{i}.attention.value.weight" ) state_dict[f"transformer_layers.{i}.attn1.to_out.0.weight"] = state_dict.pop( f"transformer_layers.{i}.attention.out.weight" ) state_dict[f"transformer_layers.{i}.norm2.norm.weight"] = state_dict.pop( f"transformer_layers.{i}.crossattn_layer_norm.weight" ) state_dict[f"transformer_layers.{i}.norm2.linear.weight"] = state_dict.pop( f"transformer_layers.{i}.cross_attn_adaLN_modulation.mapper.weight" ) state_dict[f"transformer_layers.{i}.attn2.to_q.weight"] = state_dict.pop( f"transformer_layers.{i}.crossattention.query.weight" ) state_dict[f"transformer_layers.{i}.attn2.to_k.weight"] = state_dict.pop( f"transformer_layers.{i}.crossattention.key.weight" ) state_dict[f"transformer_layers.{i}.attn2.to_v.weight"] = state_dict.pop( f"transformer_layers.{i}.crossattention.value.weight" ) state_dict[f"transformer_layers.{i}.attn2.to_out.0.weight"] = state_dict.pop( f"transformer_layers.{i}.crossattention.out.weight" ) state_dict[f"transformer_layers.{i}.norm3.norm.weight"] = state_dict.pop( f"transformer_layers.{i}.ffn.pre_mlp_layer_norm.weight" ) state_dict[f"transformer_layers.{i}.norm3.linear.weight"] = state_dict.pop( f"transformer_layers.{i}.ffn.adaLN_modulation.mapper.weight" ) wi_0_weight = state_dict.pop(f"transformer_layers.{i}.ffn.wi_0.weight") wi_1_weight = state_dict.pop(f"transformer_layers.{i}.ffn.wi_1.weight") proj_weight = torch.concat([wi_1_weight, wi_0_weight], dim=0) state_dict[f"transformer_layers.{i}.ff.net.0.proj.weight"] = proj_weight state_dict[f"transformer_layers.{i}.ff.net.2.weight"] = state_dict.pop(f"transformer_layers.{i}.ffn.wo.weight") if force_down_up_sample: state_dict["down_block.downsample.norm.weight"] = state_dict.pop("down_blocks.0.downsample.0.norm.weight") state_dict["down_block.downsample.conv.weight"] = state_dict.pop("down_blocks.0.downsample.1.weight") state_dict["up_block.upsample.norm.weight"] = state_dict.pop("up_blocks.0.upsample.0.norm.weight") state_dict["up_block.upsample.conv.weight"] = state_dict.pop("up_blocks.0.upsample.1.weight") state_dict["mlm_layer.layer_norm.weight"] = state_dict.pop("mlm_layer.layer_norm.norm.weight") for i in range(3): state_dict[f"down_block.res_blocks.{i}.norm.weight"] = state_dict.pop( f"down_blocks.0.res_blocks.{i}.norm.norm.weight" ) state_dict[f"down_block.res_blocks.{i}.channelwise_linear_1.weight"] = state_dict.pop( f"down_blocks.0.res_blocks.{i}.channelwise.0.weight" ) state_dict[f"down_block.res_blocks.{i}.channelwise_norm.gamma"] = state_dict.pop( f"down_blocks.0.res_blocks.{i}.channelwise.2.gamma" ) state_dict[f"down_block.res_blocks.{i}.channelwise_norm.beta"] = state_dict.pop( f"down_blocks.0.res_blocks.{i}.channelwise.2.beta" ) state_dict[f"down_block.res_blocks.{i}.channelwise_linear_2.weight"] = state_dict.pop( f"down_blocks.0.res_blocks.{i}.channelwise.4.weight" ) state_dict[f"down_block.res_blocks.{i}.cond_embeds_mapper.weight"] = state_dict.pop( f"down_blocks.0.res_blocks.{i}.adaLN_modulation.mapper.weight" ) state_dict[f"down_block.attention_blocks.{i}.norm1.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.attn_layer_norm.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn1.to_q.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.attention.query.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn1.to_k.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.attention.key.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn1.to_v.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.attention.value.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn1.to_out.0.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.attention.out.weight" ) state_dict[f"down_block.attention_blocks.{i}.norm2.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.crossattn_layer_norm.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn2.to_q.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.crossattention.query.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn2.to_k.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.crossattention.key.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn2.to_v.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.crossattention.value.weight" ) state_dict[f"down_block.attention_blocks.{i}.attn2.to_out.0.weight"] = state_dict.pop( f"down_blocks.0.attention_blocks.{i}.crossattention.out.weight" ) state_dict[f"up_block.res_blocks.{i}.norm.weight"] = state_dict.pop( f"up_blocks.0.res_blocks.{i}.norm.norm.weight" ) state_dict[f"up_block.res_blocks.{i}.channelwise_linear_1.weight"] = state_dict.pop( f"up_blocks.0.res_blocks.{i}.channelwise.0.weight" ) state_dict[f"up_block.res_blocks.{i}.channelwise_norm.gamma"] = state_dict.pop( f"up_blocks.0.res_blocks.{i}.channelwise.2.gamma" ) state_dict[f"up_block.res_blocks.{i}.channelwise_norm.beta"] = state_dict.pop( f"up_blocks.0.res_blocks.{i}.channelwise.2.beta" ) state_dict[f"up_block.res_blocks.{i}.channelwise_linear_2.weight"] = state_dict.pop( f"up_blocks.0.res_blocks.{i}.channelwise.4.weight" ) state_dict[f"up_block.res_blocks.{i}.cond_embeds_mapper.weight"] = state_dict.pop( f"up_blocks.0.res_blocks.{i}.adaLN_modulation.mapper.weight" ) state_dict[f"up_block.attention_blocks.{i}.norm1.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.attn_layer_norm.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn1.to_q.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.attention.query.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn1.to_k.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.attention.key.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn1.to_v.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.attention.value.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn1.to_out.0.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.attention.out.weight" ) state_dict[f"up_block.attention_blocks.{i}.norm2.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.crossattn_layer_norm.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn2.to_q.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.crossattention.query.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn2.to_k.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.crossattention.key.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn2.to_v.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.crossattention.value.weight" ) state_dict[f"up_block.attention_blocks.{i}.attn2.to_out.0.weight"] = state_dict.pop( f"up_blocks.0.attention_blocks.{i}.crossattention.out.weight" ) for key in list(state_dict.keys()): if key.startswith("up_blocks.0"): key_ = "up_block." + ".".join(key.split(".")[2:]) state_dict[key_] = state_dict.pop(key) if key.startswith("down_blocks.0"): key_ = "down_block." + ".".join(key.split(".")[2:]) state_dict[key_] = state_dict.pop(key) new_transformer.load_state_dict(state_dict) input_ids = torch.randint(0, 10, (1, 32, 32), device=old_transformer.device) encoder_hidden_states = torch.randn((1, 77, 768), device=old_transformer.device) cond_embeds = torch.randn((1, 768), device=old_transformer.device) micro_conds = torch.tensor([[512, 512, 0, 0, 6]], dtype=torch.float32, device=old_transformer.device) old_out = old_transformer(input_ids.reshape(1, -1), encoder_hidden_states, cond_embeds, micro_conds) old_out = old_out.reshape(1, 32, 32, 8192).permute(0, 3, 1, 2) new_out = new_transformer(input_ids, encoder_hidden_states, cond_embeds, micro_conds) # NOTE: these differences are solely due to using the geglu block that has a single linear layer of # double output dimension instead of two different linear layers max_diff = (old_out - new_out).abs().max() total_diff = (old_out - new_out).abs().sum() print(f"Transformer max_diff: {max_diff} total_diff: {total_diff}") assert max_diff < 0.01 assert total_diff < 1500 return new_transformer def make_vqvae(old_vae): new_vae = VQModel( act_fn="silu", block_out_channels=[128, 256, 256, 512, 768], down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], in_channels=3, latent_channels=64, layers_per_block=2, norm_num_groups=32, num_vq_embeddings=8192, out_channels=3, sample_size=32, up_block_types=[ "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", ], mid_block_add_attention=False, lookup_from_codebook=True, ) new_vae.to(device) # fmt: off new_state_dict = {} old_state_dict = old_vae.state_dict() new_state_dict["encoder.conv_in.weight"] = old_state_dict.pop("encoder.conv_in.weight") new_state_dict["encoder.conv_in.bias"] = old_state_dict.pop("encoder.conv_in.bias") convert_vae_block_state_dict(old_state_dict, "encoder.down.0", new_state_dict, "encoder.down_blocks.0") convert_vae_block_state_dict(old_state_dict, "encoder.down.1", new_state_dict, "encoder.down_blocks.1") convert_vae_block_state_dict(old_state_dict, "encoder.down.2", new_state_dict, "encoder.down_blocks.2") convert_vae_block_state_dict(old_state_dict, "encoder.down.3", new_state_dict, "encoder.down_blocks.3") convert_vae_block_state_dict(old_state_dict, "encoder.down.4", new_state_dict, "encoder.down_blocks.4") new_state_dict["encoder.mid_block.resnets.0.norm1.weight"] = old_state_dict.pop("encoder.mid.block_1.norm1.weight") new_state_dict["encoder.mid_block.resnets.0.norm1.bias"] = old_state_dict.pop("encoder.mid.block_1.norm1.bias") new_state_dict["encoder.mid_block.resnets.0.conv1.weight"] = old_state_dict.pop("encoder.mid.block_1.conv1.weight") new_state_dict["encoder.mid_block.resnets.0.conv1.bias"] = old_state_dict.pop("encoder.mid.block_1.conv1.bias") new_state_dict["encoder.mid_block.resnets.0.norm2.weight"] = old_state_dict.pop("encoder.mid.block_1.norm2.weight") new_state_dict["encoder.mid_block.resnets.0.norm2.bias"] = old_state_dict.pop("encoder.mid.block_1.norm2.bias") new_state_dict["encoder.mid_block.resnets.0.conv2.weight"] = old_state_dict.pop("encoder.mid.block_1.conv2.weight") new_state_dict["encoder.mid_block.resnets.0.conv2.bias"] = old_state_dict.pop("encoder.mid.block_1.conv2.bias") new_state_dict["encoder.mid_block.resnets.1.norm1.weight"] = old_state_dict.pop("encoder.mid.block_2.norm1.weight") new_state_dict["encoder.mid_block.resnets.1.norm1.bias"] = old_state_dict.pop("encoder.mid.block_2.norm1.bias") new_state_dict["encoder.mid_block.resnets.1.conv1.weight"] = old_state_dict.pop("encoder.mid.block_2.conv1.weight") new_state_dict["encoder.mid_block.resnets.1.conv1.bias"] = old_state_dict.pop("encoder.mid.block_2.conv1.bias") new_state_dict["encoder.mid_block.resnets.1.norm2.weight"] = old_state_dict.pop("encoder.mid.block_2.norm2.weight") new_state_dict["encoder.mid_block.resnets.1.norm2.bias"] = old_state_dict.pop("encoder.mid.block_2.norm2.bias") new_state_dict["encoder.mid_block.resnets.1.conv2.weight"] = old_state_dict.pop("encoder.mid.block_2.conv2.weight") new_state_dict["encoder.mid_block.resnets.1.conv2.bias"] = old_state_dict.pop("encoder.mid.block_2.conv2.bias") new_state_dict["encoder.conv_norm_out.weight"] = old_state_dict.pop("encoder.norm_out.weight") new_state_dict["encoder.conv_norm_out.bias"] = old_state_dict.pop("encoder.norm_out.bias") new_state_dict["encoder.conv_out.weight"] = old_state_dict.pop("encoder.conv_out.weight") new_state_dict["encoder.conv_out.bias"] = old_state_dict.pop("encoder.conv_out.bias") new_state_dict["quant_conv.weight"] = old_state_dict.pop("quant_conv.weight") new_state_dict["quant_conv.bias"] = old_state_dict.pop("quant_conv.bias") new_state_dict["quantize.embedding.weight"] = old_state_dict.pop("quantize.embedding.weight") new_state_dict["post_quant_conv.weight"] = old_state_dict.pop("post_quant_conv.weight") new_state_dict["post_quant_conv.bias"] = old_state_dict.pop("post_quant_conv.bias") new_state_dict["decoder.conv_in.weight"] = old_state_dict.pop("decoder.conv_in.weight") new_state_dict["decoder.conv_in.bias"] = old_state_dict.pop("decoder.conv_in.bias") new_state_dict["decoder.mid_block.resnets.0.norm1.weight"] = old_state_dict.pop("decoder.mid.block_1.norm1.weight") new_state_dict["decoder.mid_block.resnets.0.norm1.bias"] = old_state_dict.pop("decoder.mid.block_1.norm1.bias") new_state_dict["decoder.mid_block.resnets.0.conv1.weight"] = old_state_dict.pop("decoder.mid.block_1.conv1.weight") new_state_dict["decoder.mid_block.resnets.0.conv1.bias"] = old_state_dict.pop("decoder.mid.block_1.conv1.bias") new_state_dict["decoder.mid_block.resnets.0.norm2.weight"] = old_state_dict.pop("decoder.mid.block_1.norm2.weight") new_state_dict["decoder.mid_block.resnets.0.norm2.bias"] = old_state_dict.pop("decoder.mid.block_1.norm2.bias") new_state_dict["decoder.mid_block.resnets.0.conv2.weight"] = old_state_dict.pop("decoder.mid.block_1.conv2.weight") new_state_dict["decoder.mid_block.resnets.0.conv2.bias"] = old_state_dict.pop("decoder.mid.block_1.conv2.bias") new_state_dict["decoder.mid_block.resnets.1.norm1.weight"] = old_state_dict.pop("decoder.mid.block_2.norm1.weight") new_state_dict["decoder.mid_block.resnets.1.norm1.bias"] = old_state_dict.pop("decoder.mid.block_2.norm1.bias") new_state_dict["decoder.mid_block.resnets.1.conv1.weight"] = old_state_dict.pop("decoder.mid.block_2.conv1.weight") new_state_dict["decoder.mid_block.resnets.1.conv1.bias"] = old_state_dict.pop("decoder.mid.block_2.conv1.bias") new_state_dict["decoder.mid_block.resnets.1.norm2.weight"] = old_state_dict.pop("decoder.mid.block_2.norm2.weight") new_state_dict["decoder.mid_block.resnets.1.norm2.bias"] = old_state_dict.pop("decoder.mid.block_2.norm2.bias") new_state_dict["decoder.mid_block.resnets.1.conv2.weight"] = old_state_dict.pop("decoder.mid.block_2.conv2.weight") new_state_dict["decoder.mid_block.resnets.1.conv2.bias"] = old_state_dict.pop("decoder.mid.block_2.conv2.bias") convert_vae_block_state_dict(old_state_dict, "decoder.up.0", new_state_dict, "decoder.up_blocks.4") convert_vae_block_state_dict(old_state_dict, "decoder.up.1", new_state_dict, "decoder.up_blocks.3") convert_vae_block_state_dict(old_state_dict, "decoder.up.2", new_state_dict, "decoder.up_blocks.2") convert_vae_block_state_dict(old_state_dict, "decoder.up.3", new_state_dict, "decoder.up_blocks.1") convert_vae_block_state_dict(old_state_dict, "decoder.up.4", new_state_dict, "decoder.up_blocks.0") new_state_dict["decoder.conv_norm_out.weight"] = old_state_dict.pop("decoder.norm_out.weight") new_state_dict["decoder.conv_norm_out.bias"] = old_state_dict.pop("decoder.norm_out.bias") new_state_dict["decoder.conv_out.weight"] = old_state_dict.pop("decoder.conv_out.weight") new_state_dict["decoder.conv_out.bias"] = old_state_dict.pop("decoder.conv_out.bias") # fmt: on assert len(old_state_dict.keys()) == 0 new_vae.load_state_dict(new_state_dict) input = torch.randn((1, 3, 512, 512), device=device) input = input.clamp(-1, 1) old_encoder_output = old_vae.quant_conv(old_vae.encoder(input)) new_encoder_output = new_vae.quant_conv(new_vae.encoder(input)) assert (old_encoder_output == new_encoder_output).all() old_decoder_output = old_vae.decoder(old_vae.post_quant_conv(old_encoder_output)) new_decoder_output = new_vae.decoder(new_vae.post_quant_conv(new_encoder_output)) # assert (old_decoder_output == new_decoder_output).all() print("kipping vae decoder equivalence check") print(f"vae decoder diff {(old_decoder_output - new_decoder_output).float().abs().sum()}") old_output = old_vae(input)[0] new_output = new_vae(input)[0] # assert (old_output == new_output).all() print("skipping full vae equivalence check") print(f"vae full diff { (old_output - new_output).float().abs().sum()}") return new_vae def convert_vae_block_state_dict(old_state_dict, prefix_from, new_state_dict, prefix_to): # fmt: off new_state_dict[f"{prefix_to}.resnets.0.norm1.weight"] = old_state_dict.pop(f"{prefix_from}.block.0.norm1.weight") new_state_dict[f"{prefix_to}.resnets.0.norm1.bias"] = old_state_dict.pop(f"{prefix_from}.block.0.norm1.bias") new_state_dict[f"{prefix_to}.resnets.0.conv1.weight"] = old_state_dict.pop(f"{prefix_from}.block.0.conv1.weight") new_state_dict[f"{prefix_to}.resnets.0.conv1.bias"] = old_state_dict.pop(f"{prefix_from}.block.0.conv1.bias") new_state_dict[f"{prefix_to}.resnets.0.norm2.weight"] = old_state_dict.pop(f"{prefix_from}.block.0.norm2.weight") new_state_dict[f"{prefix_to}.resnets.0.norm2.bias"] = old_state_dict.pop(f"{prefix_from}.block.0.norm2.bias") new_state_dict[f"{prefix_to}.resnets.0.conv2.weight"] = old_state_dict.pop(f"{prefix_from}.block.0.conv2.weight") new_state_dict[f"{prefix_to}.resnets.0.conv2.bias"] = old_state_dict.pop(f"{prefix_from}.block.0.conv2.bias") if f"{prefix_from}.block.0.nin_shortcut.weight" in old_state_dict: new_state_dict[f"{prefix_to}.resnets.0.conv_shortcut.weight"] = old_state_dict.pop(f"{prefix_from}.block.0.nin_shortcut.weight") new_state_dict[f"{prefix_to}.resnets.0.conv_shortcut.bias"] = old_state_dict.pop(f"{prefix_from}.block.0.nin_shortcut.bias") new_state_dict[f"{prefix_to}.resnets.1.norm1.weight"] = old_state_dict.pop(f"{prefix_from}.block.1.norm1.weight") new_state_dict[f"{prefix_to}.resnets.1.norm1.bias"] = old_state_dict.pop(f"{prefix_from}.block.1.norm1.bias") new_state_dict[f"{prefix_to}.resnets.1.conv1.weight"] = old_state_dict.pop(f"{prefix_from}.block.1.conv1.weight") new_state_dict[f"{prefix_to}.resnets.1.conv1.bias"] = old_state_dict.pop(f"{prefix_from}.block.1.conv1.bias") new_state_dict[f"{prefix_to}.resnets.1.norm2.weight"] = old_state_dict.pop(f"{prefix_from}.block.1.norm2.weight") new_state_dict[f"{prefix_to}.resnets.1.norm2.bias"] = old_state_dict.pop(f"{prefix_from}.block.1.norm2.bias") new_state_dict[f"{prefix_to}.resnets.1.conv2.weight"] = old_state_dict.pop(f"{prefix_from}.block.1.conv2.weight") new_state_dict[f"{prefix_to}.resnets.1.conv2.bias"] = old_state_dict.pop(f"{prefix_from}.block.1.conv2.bias") if f"{prefix_from}.downsample.conv.weight" in old_state_dict: new_state_dict[f"{prefix_to}.downsamplers.0.conv.weight"] = old_state_dict.pop(f"{prefix_from}.downsample.conv.weight") new_state_dict[f"{prefix_to}.downsamplers.0.conv.bias"] = old_state_dict.pop(f"{prefix_from}.downsample.conv.bias") if f"{prefix_from}.upsample.conv.weight" in old_state_dict: new_state_dict[f"{prefix_to}.upsamplers.0.conv.weight"] = old_state_dict.pop(f"{prefix_from}.upsample.conv.weight") new_state_dict[f"{prefix_to}.upsamplers.0.conv.bias"] = old_state_dict.pop(f"{prefix_from}.upsample.conv.bias") if f"{prefix_from}.block.2.norm1.weight" in old_state_dict: new_state_dict[f"{prefix_to}.resnets.2.norm1.weight"] = old_state_dict.pop(f"{prefix_from}.block.2.norm1.weight") new_state_dict[f"{prefix_to}.resnets.2.norm1.bias"] = old_state_dict.pop(f"{prefix_from}.block.2.norm1.bias") new_state_dict[f"{prefix_to}.resnets.2.conv1.weight"] = old_state_dict.pop(f"{prefix_from}.block.2.conv1.weight") new_state_dict[f"{prefix_to}.resnets.2.conv1.bias"] = old_state_dict.pop(f"{prefix_from}.block.2.conv1.bias") new_state_dict[f"{prefix_to}.resnets.2.norm2.weight"] = old_state_dict.pop(f"{prefix_from}.block.2.norm2.weight") new_state_dict[f"{prefix_to}.resnets.2.norm2.bias"] = old_state_dict.pop(f"{prefix_from}.block.2.norm2.bias") new_state_dict[f"{prefix_to}.resnets.2.conv2.weight"] = old_state_dict.pop(f"{prefix_from}.block.2.conv2.weight") new_state_dict[f"{prefix_to}.resnets.2.conv2.bias"] = old_state_dict.pop(f"{prefix_from}.block.2.conv2.bias") # fmt: on if __name__ == "__main__": main()
diffusers/scripts/convert_amused.py/0
{ "file_path": "diffusers/scripts/convert_amused.py", "repo_id": "diffusers", "token_count": 12883 }
112
import argparse import huggingface_hub import k_diffusion as K import torch from diffusers import UNet2DConditionModel UPSCALER_REPO = "pcuenq/k-upscaler" def resnet_to_diffusers_checkpoint(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix): rv = { # norm1 f"{diffusers_resnet_prefix}.norm1.linear.weight": checkpoint[f"{resnet_prefix}.main.0.mapper.weight"], f"{diffusers_resnet_prefix}.norm1.linear.bias": checkpoint[f"{resnet_prefix}.main.0.mapper.bias"], # conv1 f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.main.2.weight"], f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.main.2.bias"], # norm2 f"{diffusers_resnet_prefix}.norm2.linear.weight": checkpoint[f"{resnet_prefix}.main.4.mapper.weight"], f"{diffusers_resnet_prefix}.norm2.linear.bias": checkpoint[f"{resnet_prefix}.main.4.mapper.bias"], # conv2 f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.main.6.weight"], f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.main.6.bias"], } if resnet.conv_shortcut is not None: rv.update( { f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.skip.weight"], } ) return rv def self_attn_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix): weight_q, weight_k, weight_v = checkpoint[f"{attention_prefix}.qkv_proj.weight"].chunk(3, dim=0) bias_q, bias_k, bias_v = checkpoint[f"{attention_prefix}.qkv_proj.bias"].chunk(3, dim=0) rv = { # norm f"{diffusers_attention_prefix}.norm1.linear.weight": checkpoint[f"{attention_prefix}.norm_in.mapper.weight"], f"{diffusers_attention_prefix}.norm1.linear.bias": checkpoint[f"{attention_prefix}.norm_in.mapper.bias"], # to_q f"{diffusers_attention_prefix}.attn1.to_q.weight": weight_q.squeeze(-1).squeeze(-1), f"{diffusers_attention_prefix}.attn1.to_q.bias": bias_q, # to_k f"{diffusers_attention_prefix}.attn1.to_k.weight": weight_k.squeeze(-1).squeeze(-1), f"{diffusers_attention_prefix}.attn1.to_k.bias": bias_k, # to_v f"{diffusers_attention_prefix}.attn1.to_v.weight": weight_v.squeeze(-1).squeeze(-1), f"{diffusers_attention_prefix}.attn1.to_v.bias": bias_v, # to_out f"{diffusers_attention_prefix}.attn1.to_out.0.weight": checkpoint[f"{attention_prefix}.out_proj.weight"] .squeeze(-1) .squeeze(-1), f"{diffusers_attention_prefix}.attn1.to_out.0.bias": checkpoint[f"{attention_prefix}.out_proj.bias"], } return rv def cross_attn_to_diffusers_checkpoint( checkpoint, *, diffusers_attention_prefix, diffusers_attention_index, attention_prefix ): weight_k, weight_v = checkpoint[f"{attention_prefix}.kv_proj.weight"].chunk(2, dim=0) bias_k, bias_v = checkpoint[f"{attention_prefix}.kv_proj.bias"].chunk(2, dim=0) rv = { # norm2 (ada groupnorm) f"{diffusers_attention_prefix}.norm{diffusers_attention_index}.linear.weight": checkpoint[ f"{attention_prefix}.norm_dec.mapper.weight" ], f"{diffusers_attention_prefix}.norm{diffusers_attention_index}.linear.bias": checkpoint[ f"{attention_prefix}.norm_dec.mapper.bias" ], # layernorm on encoder_hidden_state f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.norm_cross.weight": checkpoint[ f"{attention_prefix}.norm_enc.weight" ], f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.norm_cross.bias": checkpoint[ f"{attention_prefix}.norm_enc.bias" ], # to_q f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_q.weight": checkpoint[ f"{attention_prefix}.q_proj.weight" ] .squeeze(-1) .squeeze(-1), f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_q.bias": checkpoint[ f"{attention_prefix}.q_proj.bias" ], # to_k f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_k.weight": weight_k.squeeze(-1).squeeze(-1), f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_k.bias": bias_k, # to_v f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_v.weight": weight_v.squeeze(-1).squeeze(-1), f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_v.bias": bias_v, # to_out f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_out.0.weight": checkpoint[ f"{attention_prefix}.out_proj.weight" ] .squeeze(-1) .squeeze(-1), f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_out.0.bias": checkpoint[ f"{attention_prefix}.out_proj.bias" ], } return rv def block_to_diffusers_checkpoint(block, checkpoint, block_idx, block_type): block_prefix = "inner_model.u_net.u_blocks" if block_type == "up" else "inner_model.u_net.d_blocks" block_prefix = f"{block_prefix}.{block_idx}" diffusers_checkpoint = {} if not hasattr(block, "attentions"): n = 1 # resnet only elif not block.attentions[0].add_self_attention: n = 2 # resnet -> cross-attention else: n = 3 # resnet -> self-attention -> cross-attention) for resnet_idx, resnet in enumerate(block.resnets): # diffusers_resnet_prefix = f"{diffusers_up_block_prefix}.resnets.{resnet_idx}" diffusers_resnet_prefix = f"{block_type}_blocks.{block_idx}.resnets.{resnet_idx}" idx = n * resnet_idx if block_type == "up" else n * resnet_idx + 1 resnet_prefix = f"{block_prefix}.{idx}" if block_type == "up" else f"{block_prefix}.{idx}" diffusers_checkpoint.update( resnet_to_diffusers_checkpoint( resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix ) ) if hasattr(block, "attentions"): for attention_idx, attention in enumerate(block.attentions): diffusers_attention_prefix = f"{block_type}_blocks.{block_idx}.attentions.{attention_idx}" idx = n * attention_idx + 1 if block_type == "up" else n * attention_idx + 2 self_attention_prefix = f"{block_prefix}.{idx}" cross_attention_prefix = f"{block_prefix}.{idx }" cross_attention_index = 1 if not attention.add_self_attention else 2 idx = ( n * attention_idx + cross_attention_index if block_type == "up" else n * attention_idx + cross_attention_index + 1 ) cross_attention_prefix = f"{block_prefix}.{idx }" diffusers_checkpoint.update( cross_attn_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, diffusers_attention_index=2, attention_prefix=cross_attention_prefix, ) ) if attention.add_self_attention is True: diffusers_checkpoint.update( self_attn_to_diffusers_checkpoint( checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=self_attention_prefix, ) ) return diffusers_checkpoint def unet_to_diffusers_checkpoint(model, checkpoint): diffusers_checkpoint = {} # pre-processing diffusers_checkpoint.update( { "conv_in.weight": checkpoint["inner_model.proj_in.weight"], "conv_in.bias": checkpoint["inner_model.proj_in.bias"], } ) # timestep and class embedding diffusers_checkpoint.update( { "time_proj.weight": checkpoint["inner_model.timestep_embed.weight"].squeeze(-1), "time_embedding.linear_1.weight": checkpoint["inner_model.mapping.0.weight"], "time_embedding.linear_1.bias": checkpoint["inner_model.mapping.0.bias"], "time_embedding.linear_2.weight": checkpoint["inner_model.mapping.2.weight"], "time_embedding.linear_2.bias": checkpoint["inner_model.mapping.2.bias"], "time_embedding.cond_proj.weight": checkpoint["inner_model.mapping_cond.weight"], } ) # down_blocks for down_block_idx, down_block in enumerate(model.down_blocks): diffusers_checkpoint.update(block_to_diffusers_checkpoint(down_block, checkpoint, down_block_idx, "down")) # up_blocks for up_block_idx, up_block in enumerate(model.up_blocks): diffusers_checkpoint.update(block_to_diffusers_checkpoint(up_block, checkpoint, up_block_idx, "up")) # post-processing diffusers_checkpoint.update( { "conv_out.weight": checkpoint["inner_model.proj_out.weight"], "conv_out.bias": checkpoint["inner_model.proj_out.bias"], } ) return diffusers_checkpoint def unet_model_from_original_config(original_config): in_channels = original_config["input_channels"] + original_config["unet_cond_dim"] out_channels = original_config["input_channels"] + (1 if original_config["has_variance"] else 0) block_out_channels = original_config["channels"] assert ( len(set(original_config["depths"])) == 1 ), "UNet2DConditionModel currently do not support blocks with different number of layers" layers_per_block = original_config["depths"][0] class_labels_dim = original_config["mapping_cond_dim"] cross_attention_dim = original_config["cross_cond_dim"] attn1_types = [] attn2_types = [] for s, c in zip(original_config["self_attn_depths"], original_config["cross_attn_depths"]): if s: a1 = "self" a2 = "cross" if c else None elif c: a1 = "cross" a2 = None else: a1 = None a2 = None attn1_types.append(a1) attn2_types.append(a2) unet = UNet2DConditionModel( in_channels=in_channels, out_channels=out_channels, down_block_types=("KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D"), mid_block_type=None, up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"), block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn="gelu", norm_num_groups=None, cross_attention_dim=cross_attention_dim, attention_head_dim=64, time_cond_proj_dim=class_labels_dim, resnet_time_scale_shift="scale_shift", time_embedding_type="fourier", timestep_post_act="gelu", conv_in_kernel=1, conv_out_kernel=1, ) return unet def main(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") orig_config_path = huggingface_hub.hf_hub_download(UPSCALER_REPO, "config_laion_text_cond_latent_upscaler_2.json") orig_weights_path = huggingface_hub.hf_hub_download( UPSCALER_REPO, "laion_text_cond_latent_upscaler_2_1_00470000_slim.pth" ) print(f"loading original model configuration from {orig_config_path}") print(f"loading original model checkpoint from {orig_weights_path}") print("converting to diffusers unet") orig_config = K.config.load_config(open(orig_config_path))["model"] model = unet_model_from_original_config(orig_config) orig_checkpoint = torch.load(orig_weights_path, map_location=device)["model_ema"] converted_checkpoint = unet_to_diffusers_checkpoint(model, orig_checkpoint) model.load_state_dict(converted_checkpoint, strict=True) model.save_pretrained(args.dump_path) print(f"saving converted unet model in {args.dump_path}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() main(args)
diffusers/scripts/convert_k_upscaler_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_k_upscaler_to_diffusers.py", "repo_id": "diffusers", "token_count": 5645 }
113
import argparse import os import torch from transformers import T5EncoderModel, T5Tokenizer from diffusers import AutoencoderKL, DPMSolverMultistepScheduler, PixArtAlphaPipeline, Transformer2DModel ckpt_id = "PixArt-alpha/PixArt-alpha" # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/scripts/inference.py#L125 interpolation_scale = {512: 1, 1024: 2} def main(args): all_state_dict = torch.load(args.orig_ckpt_path) state_dict = all_state_dict.pop("state_dict") converted_state_dict = {} # Patch embeddings. converted_state_dict["pos_embed.proj.weight"] = state_dict.pop("x_embedder.proj.weight") converted_state_dict["pos_embed.proj.bias"] = state_dict.pop("x_embedder.proj.bias") # Caption projection. converted_state_dict["caption_projection.y_embedding"] = state_dict.pop("y_embedder.y_embedding") converted_state_dict["caption_projection.linear_1.weight"] = state_dict.pop("y_embedder.y_proj.fc1.weight") converted_state_dict["caption_projection.linear_1.bias"] = state_dict.pop("y_embedder.y_proj.fc1.bias") converted_state_dict["caption_projection.linear_2.weight"] = state_dict.pop("y_embedder.y_proj.fc2.weight") converted_state_dict["caption_projection.linear_2.bias"] = state_dict.pop("y_embedder.y_proj.fc2.bias") # AdaLN-single LN converted_state_dict["adaln_single.emb.timestep_embedder.linear_1.weight"] = state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["adaln_single.emb.timestep_embedder.linear_1.bias"] = state_dict.pop("t_embedder.mlp.0.bias") converted_state_dict["adaln_single.emb.timestep_embedder.linear_2.weight"] = state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["adaln_single.emb.timestep_embedder.linear_2.bias"] = state_dict.pop("t_embedder.mlp.2.bias") if args.image_size == 1024: # Resolution. converted_state_dict["adaln_single.emb.resolution_embedder.linear_1.weight"] = state_dict.pop( "csize_embedder.mlp.0.weight" ) converted_state_dict["adaln_single.emb.resolution_embedder.linear_1.bias"] = state_dict.pop( "csize_embedder.mlp.0.bias" ) converted_state_dict["adaln_single.emb.resolution_embedder.linear_2.weight"] = state_dict.pop( "csize_embedder.mlp.2.weight" ) converted_state_dict["adaln_single.emb.resolution_embedder.linear_2.bias"] = state_dict.pop( "csize_embedder.mlp.2.bias" ) # Aspect ratio. converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_1.weight"] = state_dict.pop( "ar_embedder.mlp.0.weight" ) converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_1.bias"] = state_dict.pop( "ar_embedder.mlp.0.bias" ) converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_2.weight"] = state_dict.pop( "ar_embedder.mlp.2.weight" ) converted_state_dict["adaln_single.emb.aspect_ratio_embedder.linear_2.bias"] = state_dict.pop( "ar_embedder.mlp.2.bias" ) # Shared norm. converted_state_dict["adaln_single.linear.weight"] = state_dict.pop("t_block.1.weight") converted_state_dict["adaln_single.linear.bias"] = state_dict.pop("t_block.1.bias") for depth in range(28): # Transformer blocks. converted_state_dict[f"transformer_blocks.{depth}.scale_shift_table"] = state_dict.pop( f"blocks.{depth}.scale_shift_table" ) # Attention is all you need 🤘 # Self attention. q, k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.attn.qkv.weight"), 3, dim=0) q_bias, k_bias, v_bias = torch.chunk(state_dict.pop(f"blocks.{depth}.attn.qkv.bias"), 3, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.bias"] = v_bias # Projection. converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.attn.proj.bias" ) # Feed-forward. converted_state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.weight"] = state_dict.pop( f"blocks.{depth}.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.bias"] = state_dict.pop( f"blocks.{depth}.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.net.2.weight"] = state_dict.pop( f"blocks.{depth}.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.net.2.bias"] = state_dict.pop( f"blocks.{depth}.mlp.fc2.bias" ) # Cross-attention. q = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.weight") q_bias = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.bias") k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.weight"), 2, dim=0) k_bias, v_bias = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.bias"), 2, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.bias"] = v_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.bias" ) # Final block. converted_state_dict["proj_out.weight"] = state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = state_dict.pop("final_layer.linear.bias") converted_state_dict["scale_shift_table"] = state_dict.pop("final_layer.scale_shift_table") # DiT XL/2 transformer = Transformer2DModel( sample_size=args.image_size // 8, num_layers=28, attention_head_dim=72, in_channels=4, out_channels=8, patch_size=2, attention_bias=True, num_attention_heads=16, cross_attention_dim=1152, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, caption_channels=4096, ) transformer.load_state_dict(converted_state_dict, strict=True) assert transformer.pos_embed.pos_embed is not None state_dict.pop("pos_embed") assert len(state_dict) == 0, f"State dict is not empty, {state_dict.keys()}" num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") if args.only_transformer: transformer.save_pretrained(os.path.join(args.dump_path, "transformer")) else: scheduler = DPMSolverMultistepScheduler() vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="sd-vae-ft-ema") tokenizer = T5Tokenizer.from_pretrained(ckpt_id, subfolder="t5-v1_1-xxl") text_encoder = T5EncoderModel.from_pretrained(ckpt_id, subfolder="t5-v1_1-xxl") pipeline = PixArtAlphaPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler ) pipeline.save_pretrained(args.dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--orig_ckpt_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--image_size", default=1024, type=int, choices=[512, 1024], required=False, help="Image size of pretrained model, either 512 or 1024.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--only_transformer", default=True, type=bool, required=True) args = parser.parse_args() main(args)
diffusers/scripts/convert_pixart_alpha_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_pixart_alpha_to_diffusers.py", "repo_id": "diffusers", "token_count": 4086 }
114
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for PyPI. 1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the documentation. If releasing on a special branch, copy the updated README.md on the main branch for the commit you will make for the post-release and run `make fix-copies` on the main branch as well. 2. Run Tests for Amazon Sagemaker. The documentation is located in `./tests/sagemaker/README.md`, otherwise @philschmid. 3. Unpin specific versions from setup.py that use a git install. 4. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the message: "Release: <RELEASE>" and push. 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs). 6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for PyPI'" Push the tag to git: git push --tags origin v<RELEASE>-release 7. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory (This will build a wheel for the Python version you use to build it). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. Long story cut short, you need to run both before you can upload the distribution to the test PyPI and the actual PyPI servers: python setup.py bdist_wheel && python setup.py sdist 8. Check that everything looks correct by uploading the package to the PyPI test server: twine upload dist/* -r pypitest (pypi suggests using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi diffusers If you are testing from a Colab Notebook, for instance, then do: pip install diffusers && pip uninstall diffusers pip install -i https://testpypi.python.org/pypi diffusers Check you can run the following commands: python -c "from diffusers import __version__; print(__version__)" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')" python -c "from diffusers import *" 9. Upload the final version to the actual PyPI: twine upload dist/* -r pypi 10. Prepare the release notes and publish them on GitHub once everything is looking hunky-dory. 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, you need to go back to main before executing this. """ import os import re import sys from distutils.core import Command from setuptools import find_packages, setup # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py _deps = [ "Pillow", # keep the PIL.Image.Resampling deprecation away "accelerate>=0.11.0", "compel==0.1.8", "datasets", "filelock", "flax>=0.4.1", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.20.2", "requests-mock==1.10.0", "importlib_metadata", "invisible-watermark>=0.2.0", "isort>=5.5.4", "jax>=0.4.1", "jaxlib>=0.4.1", "Jinja2", "k-diffusion>=0.0.12", "torchsde", "note_seq", "librosa", "numpy", "parameterized", "peft>=0.6.0", "protobuf>=3.20.3,<4", "pytest", "pytest-timeout", "pytest-xdist", "python>=3.8.0", "ruff==0.1.5", "safetensors>=0.3.1", "sentencepiece>=0.1.91,!=0.1.92", "GitPython<3.1.19", "scipy", "onnx", "regex!=2019.12.17", "requests", "tensorboard", "torch>=1.4,<2.2.0", "torchvision<0.17", "transformers>=4.25.1", "urllib3<=2.0.0", ] # this is a lookup table with items like: # # tokenizers: "huggingface-hub==0.8.0" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} # since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: # # python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets # # Just pass the desired package names to that script as it's shown with 2 packages above. # # If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above # # You can then feed this for example to `pip`: # # pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets) # def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] class DepsTableUpdateCommand(Command): """ A custom distutils command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ( "dep-table-update", None, "updates src/diffusers/dependency_versions_table.py", ), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make deps_table_update`", "deps = {", entries, "}", "", ] target = "src/diffusers/dependency_versions_table.py" print(f"updating {target}") with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) extras = {} extras["quality"] = deps_list("urllib3", "isort", "ruff", "hf-doc-builder") extras["docs"] = deps_list("hf-doc-builder") extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2", "peft") extras["test"] = deps_list( "compel", "GitPython", "datasets", "Jinja2", "invisible-watermark", "k-diffusion", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock", "safetensors", "sentencepiece", "scipy", "torchvision", "transformers", ) extras["torch"] = deps_list("torch", "accelerate") if os.name == "nt": # windows extras["flax"] = [] # jax is not supported on windows else: extras["flax"] = deps_list("jax", "jaxlib", "flax") extras["dev"] = ( extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"] ) install_requires = [ deps["importlib_metadata"], deps["filelock"], deps["huggingface-hub"], deps["numpy"], deps["regex"], deps["requests"], deps["safetensors"], deps["Pillow"], ] version_range_max = max(sys.version_info[1], 10) + 1 setup( name="diffusers", version="0.26.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="State-of-the-art diffusion in PyTorch and JAX.", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning diffusion jax pytorch stable diffusion audioldm", license="Apache 2.0 License", author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)", author_email="[email protected]", url="https://github.com/huggingface/diffusers", package_dir={"": "src"}, packages=find_packages("src"), package_data={"diffusers": ["py.typed"]}, include_package_data=True, python_requires=">=3.8.0", install_requires=list(install_requires), extras_require=extras, entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python :: 3", ] + [f"Programming Language :: Python :: 3.{i}" for i in range(8, version_range_max)], cmdclass={"deps_table_update": DepsTableUpdateCommand}, )
diffusers/setup.py/0
{ "file_path": "diffusers/setup.py", "repo_id": "diffusers", "token_count": 3776 }
115
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub.utils import validate_hf_hub_args from .single_file_utils import ( create_diffusers_controlnet_model_from_ldm, fetch_ldm_config_and_checkpoint, ) class FromOriginalControlNetMixin: """ Load pretrained ControlNet weights saved in the `.ckpt` or `.safetensors` format into a [`ControlNetModel`]. """ @classmethod @validate_hf_hub_args def from_single_file(cls, pretrained_model_link_or_path, **kwargs): r""" Instantiate a [`ControlNetModel`] from pretrained ControlNet weights saved in the original `.ckpt` or `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A link to the `.ckpt` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub. - A path to a *file* containing all pipeline weights. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to True, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. image_size (`int`, *optional*, defaults to 512): The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable Diffusion v2 base model. Use 768 for Stable Diffusion v2. upcast_attention (`bool`, *optional*, defaults to `None`): Whether the attention computation should always be upcasted. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (for example the pipeline components of the specific pipeline class). The overwritten components are directly passed to the pipelines `__init__` method. See example below for more information. Examples: ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" # can also be a local path model = ControlNetModel.from_single_file(url) url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors" # can also be a local path pipe = StableDiffusionControlNetPipeline.from_single_file(url, controlnet=controlnet) ``` """ original_config_file = kwargs.pop("original_config_file", None) resume_download = kwargs.pop("resume_download", False) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) cache_dir = kwargs.pop("cache_dir", None) local_files_only = kwargs.pop("local_files_only", None) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) use_safetensors = kwargs.pop("use_safetensors", True) class_name = cls.__name__ original_config, checkpoint = fetch_ldm_config_and_checkpoint( pretrained_model_link_or_path=pretrained_model_link_or_path, class_name=class_name, original_config_file=original_config_file, resume_download=resume_download, force_download=force_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, use_safetensors=use_safetensors, cache_dir=cache_dir, ) upcast_attention = kwargs.pop("upcast_attention", False) image_size = kwargs.pop("image_size", None) component = create_diffusers_controlnet_model_from_ldm( class_name, original_config, checkpoint, upcast_attention=upcast_attention, image_size=image_size ) controlnet = component["controlnet"] if torch_dtype is not None: controlnet = controlnet.to(torch_dtype) return controlnet
diffusers/src/diffusers/loaders/controlnet.py/0
{ "file_path": "diffusers/src/diffusers/loaders/controlnet.py", "repo_id": "diffusers", "token_count": 2757 }
116
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from importlib import import_module from typing import Callable, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..utils import USE_PEFT_BACKEND, deprecate, logging from ..utils.import_utils import is_xformers_available from ..utils.torch_utils import maybe_allow_in_graph from .lora import LoRACompatibleLinear, LoRALinearLayer logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_xformers_available(): import xformers import xformers.ops else: xformers = None @maybe_allow_in_graph class Attention(nn.Module): r""" A cross attention layer. Parameters: query_dim (`int`): The number of channels in the query. cross_attention_dim (`int`, *optional*): The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention. dim_head (`int`, *optional*, defaults to 64): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. bias (`bool`, *optional*, defaults to False): Set to `True` for the query, key, and value linear layers to contain a bias parameter. upcast_attention (`bool`, *optional*, defaults to False): Set to `True` to upcast the attention computation to `float32`. upcast_softmax (`bool`, *optional*, defaults to False): Set to `True` to upcast the softmax computation to `float32`. cross_attention_norm (`str`, *optional*, defaults to `None`): The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. cross_attention_norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the group norm in the cross attention. added_kv_proj_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the added key and value projections. If `None`, no projection is used. norm_num_groups (`int`, *optional*, defaults to `None`): The number of groups to use for the group norm in the attention. spatial_norm_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the spatial normalization. out_bias (`bool`, *optional*, defaults to `True`): Set to `True` to use a bias in the output linear layer. scale_qk (`bool`, *optional*, defaults to `True`): Set to `True` to scale the query and key by `1 / sqrt(dim_head)`. only_cross_attention (`bool`, *optional*, defaults to `False`): Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if `added_kv_proj_dim` is not `None`. eps (`float`, *optional*, defaults to 1e-5): An additional value added to the denominator in group normalization that is used for numerical stability. rescale_output_factor (`float`, *optional*, defaults to 1.0): A factor to rescale the output by dividing it with this value. residual_connection (`bool`, *optional*, defaults to `False`): Set to `True` to add the residual connection to the output. _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`): Set to `True` if the attention block is loaded from a deprecated state dict. processor (`AttnProcessor`, *optional*, defaults to `None`): The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and `AttnProcessor` otherwise. """ def __init__( self, query_dim: int, cross_attention_dim: Optional[int] = None, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, upcast_attention: bool = False, upcast_softmax: bool = False, cross_attention_norm: Optional[str] = None, cross_attention_norm_num_groups: int = 32, added_kv_proj_dim: Optional[int] = None, norm_num_groups: Optional[int] = None, spatial_norm_dim: Optional[int] = None, out_bias: bool = True, scale_qk: bool = True, only_cross_attention: bool = False, eps: float = 1e-5, rescale_output_factor: float = 1.0, residual_connection: bool = False, _from_deprecated_attn_block: bool = False, processor: Optional["AttnProcessor"] = None, out_dim: int = None, ): super().__init__() self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim self.upcast_attention = upcast_attention self.upcast_softmax = upcast_softmax self.rescale_output_factor = rescale_output_factor self.residual_connection = residual_connection self.dropout = dropout self.fused_projections = False self.out_dim = out_dim if out_dim is not None else query_dim # we make use of this private variable to know whether this class is loaded # with an deprecated state dict so that we can convert it on the fly self._from_deprecated_attn_block = _from_deprecated_attn_block self.scale_qk = scale_qk self.scale = dim_head**-0.5 if self.scale_qk else 1.0 self.heads = out_dim // dim_head if out_dim is not None else heads # for slice_size > 0 the attention score computation # is split across the batch axis to save memory # You can set slice_size with `set_attention_slice` self.sliceable_head_dim = heads self.added_kv_proj_dim = added_kv_proj_dim self.only_cross_attention = only_cross_attention if self.added_kv_proj_dim is None and self.only_cross_attention: raise ValueError( "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." ) if norm_num_groups is not None: self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True) else: self.group_norm = None if spatial_norm_dim is not None: self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim) else: self.spatial_norm = None if cross_attention_norm is None: self.norm_cross = None elif cross_attention_norm == "layer_norm": self.norm_cross = nn.LayerNorm(self.cross_attention_dim) elif cross_attention_norm == "group_norm": if self.added_kv_proj_dim is not None: # The given `encoder_hidden_states` are initially of shape # (batch_size, seq_len, added_kv_proj_dim) before being projected # to (batch_size, seq_len, cross_attention_dim). The norm is applied # before the projection, so we need to use `added_kv_proj_dim` as # the number of channels for the group norm. norm_cross_num_channels = added_kv_proj_dim else: norm_cross_num_channels = self.cross_attention_dim self.norm_cross = nn.GroupNorm( num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True ) else: raise ValueError( f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'" ) if USE_PEFT_BACKEND: linear_cls = nn.Linear else: linear_cls = LoRACompatibleLinear self.linear_cls = linear_cls self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias) if not self.only_cross_attention: # only relevant for the `AddedKVProcessor` classes self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) else: self.to_k = None self.to_v = None if self.added_kv_proj_dim is not None: self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim) self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim) self.to_out = nn.ModuleList([]) self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(nn.Dropout(dropout)) # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 if processor is None: processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_use_memory_efficient_attention_xformers( self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None ) -> None: r""" Set whether to use memory efficient attention from `xformers` or not. Args: use_memory_efficient_attention_xformers (`bool`): Whether to use memory efficient attention from `xformers` or not. attention_op (`Callable`, *optional*): The attention operation to use. Defaults to `None` which uses the default attention operation from `xformers`. """ is_lora = hasattr(self, "processor") and isinstance( self.processor, LORA_ATTENTION_PROCESSORS, ) is_custom_diffusion = hasattr(self, "processor") and isinstance( self.processor, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0), ) is_added_kv_processor = hasattr(self, "processor") and isinstance( self.processor, ( AttnAddedKVProcessor, AttnAddedKVProcessor2_0, SlicedAttnAddedKVProcessor, XFormersAttnAddedKVProcessor, LoRAAttnAddedKVProcessor, ), ) if use_memory_efficient_attention_xformers: if is_added_kv_processor and (is_lora or is_custom_diffusion): raise NotImplementedError( f"Memory efficient attention is currently not supported for LoRA or custom diffusion for attention processor type {self.processor}" ) if not is_xformers_available(): raise ModuleNotFoundError( ( "Refer to https://github.com/facebookresearch/xformers for more information on how to install" " xformers" ), name="xformers", ) elif not torch.cuda.is_available(): raise ValueError( "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" " only available for GPU " ) else: try: # Make sure we can run the memory efficient attention _ = xformers.ops.memory_efficient_attention( torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), ) except Exception as e: raise e if is_lora: # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0? processor = LoRAXFormersAttnProcessor( hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, rank=self.processor.rank, attention_op=attention_op, ) processor.load_state_dict(self.processor.state_dict()) processor.to(self.processor.to_q_lora.up.weight.device) elif is_custom_diffusion: processor = CustomDiffusionXFormersAttnProcessor( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, attention_op=attention_op, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) elif is_added_kv_processor: # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP # which uses this type of cross attention ONLY because the attention mask of format # [0, ..., -10.000, ..., 0, ...,] is not supported # throw warning logger.info( "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation." ) processor = XFormersAttnAddedKVProcessor(attention_op=attention_op) else: processor = XFormersAttnProcessor(attention_op=attention_op) else: if is_lora: attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor ) processor = attn_processor_class( hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, rank=self.processor.rank, ) processor.load_state_dict(self.processor.state_dict()) processor.to(self.processor.to_q_lora.up.weight.device) elif is_custom_diffusion: attn_processor_class = ( CustomDiffusionAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else CustomDiffusionAttnProcessor ) processor = attn_processor_class( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_attention_slice(self, slice_size: int) -> None: r""" Set the slice size for attention computation. Args: slice_size (`int`): The slice size for attention computation. """ if slice_size is not None and slice_size > self.sliceable_head_dim: raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") if slice_size is not None and self.added_kv_proj_dim is not None: processor = SlicedAttnAddedKVProcessor(slice_size) elif slice_size is not None: processor = SlicedAttnProcessor(slice_size) elif self.added_kv_proj_dim is not None: processor = AttnAddedKVProcessor() else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor) def set_processor(self, processor: "AttnProcessor") -> None: r""" Set the attention processor to use. Args: processor (`AttnProcessor`): The attention processor to use. """ # if current processor is in `self._modules` and if passed `processor` is not, we need to # pop `processor` from `self._modules` if ( hasattr(self, "processor") and isinstance(self.processor, torch.nn.Module) and not isinstance(processor, torch.nn.Module) ): logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") self._modules.pop("processor") self.processor = processor def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": r""" Get the attention processor in use. Args: return_deprecated_lora (`bool`, *optional*, defaults to `False`): Set to `True` to return the deprecated LoRA attention processor. Returns: "AttentionProcessor": The attention processor in use. """ if not return_deprecated_lora: return self.processor # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible # serialization format for LoRA Attention Processors. It should be deleted once the integration # with PEFT is completed. is_lora_activated = { name: module.lora_layer is not None for name, module in self.named_modules() if hasattr(module, "lora_layer") } # 1. if no layer has a LoRA activated we can return the processor as usual if not any(is_lora_activated.values()): return self.processor # If doesn't apply LoRA do `add_k_proj` or `add_v_proj` is_lora_activated.pop("add_k_proj", None) is_lora_activated.pop("add_v_proj", None) # 2. else it is not posssible that only some layers have LoRA activated if not all(is_lora_activated.values()): raise ValueError( f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}" ) # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor non_lora_processor_cls_name = self.processor.__class__.__name__ lora_processor_cls = getattr(import_module(__name__), "LoRA" + non_lora_processor_cls_name) hidden_size = self.inner_dim # now create a LoRA attention processor from the LoRA layers if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]: kwargs = { "cross_attention_dim": self.cross_attention_dim, "rank": self.to_q.lora_layer.rank, "network_alpha": self.to_q.lora_layer.network_alpha, "q_rank": self.to_q.lora_layer.rank, "q_hidden_size": self.to_q.lora_layer.out_features, "k_rank": self.to_k.lora_layer.rank, "k_hidden_size": self.to_k.lora_layer.out_features, "v_rank": self.to_v.lora_layer.rank, "v_hidden_size": self.to_v.lora_layer.out_features, "out_rank": self.to_out[0].lora_layer.rank, "out_hidden_size": self.to_out[0].lora_layer.out_features, } if hasattr(self.processor, "attention_op"): kwargs["attention_op"] = self.processor.attention_op lora_processor = lora_processor_cls(hidden_size, **kwargs) lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict()) elif lora_processor_cls == LoRAAttnAddedKVProcessor: lora_processor = lora_processor_cls( hidden_size, cross_attention_dim=self.add_k_proj.weight.shape[0], rank=self.to_q.lora_layer.rank, network_alpha=self.to_q.lora_layer.network_alpha, ) lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict()) # only save if used if self.add_k_proj.lora_layer is not None: lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict()) lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict()) else: lora_processor.add_k_proj_lora = None lora_processor.add_v_proj_lora = None else: raise ValueError(f"{lora_processor_cls} does not exist.") return lora_processor def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, **cross_attention_kwargs, ) -> torch.Tensor: r""" The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer. """ # The `Attention` class can call different attention processors / attention functions # here we simply pass along all tensors to the selected processor class # For standard processors that are defined here, `**cross_attention_kwargs` is empty return self.processor( self, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, ) def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads batch_size, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is reshaped to `[batch_size * heads, seq_len, dim // heads]`. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads batch_size, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3) if out_dim == 3: tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) return tensor def get_attention_scores( self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None ) -> torch.Tensor: r""" Compute the attention scores. Args: query (`torch.Tensor`): The query tensor. key (`torch.Tensor`): The key tensor. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. Returns: `torch.Tensor`: The attention probabilities/scores. """ dtype = query.dtype if self.upcast_attention: query = query.float() key = key.float() if attention_mask is None: baddbmm_input = torch.empty( query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device ) beta = 0 else: baddbmm_input = attention_mask beta = 1 attention_scores = torch.baddbmm( baddbmm_input, query, key.transpose(-1, -2), beta=beta, alpha=self.scale, ) del baddbmm_input if self.upcast_softmax: attention_scores = attention_scores.float() attention_probs = attention_scores.softmax(dim=-1) del attention_scores attention_probs = attention_probs.to(dtype) return attention_probs def prepare_attention_mask( self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 ) -> torch.Tensor: r""" Prepare the attention mask for the attention computation. Args: attention_mask (`torch.Tensor`): The attention mask to prepare. target_length (`int`): The target length of the attention mask. This is the length of the attention mask after padding. batch_size (`int`): The batch size, which is used to repeat the attention mask. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the attention mask. Can be either `3` or `4`. Returns: `torch.Tensor`: The prepared attention mask. """ head_size = self.heads if attention_mask is None: return attention_mask current_length: int = attention_mask.shape[-1] if current_length != target_length: if attention_mask.device.type == "mps": # HACK: MPS: Does not support padding by greater than dimension of input tensor. # Instead, we can manually construct the padding tensor. padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, padding], dim=2) else: # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: # we want to instead pad by (0, remaining_length), where remaining_length is: # remaining_length: int = target_length - current_length # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) if out_dim == 3: if attention_mask.shape[0] < batch_size * head_size: attention_mask = attention_mask.repeat_interleave(head_size, dim=0) elif out_dim == 4: attention_mask = attention_mask.unsqueeze(1) attention_mask = attention_mask.repeat_interleave(head_size, dim=1) return attention_mask def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: r""" Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the `Attention` class. Args: encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. Returns: `torch.Tensor`: The normalized encoder hidden states. """ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" if isinstance(self.norm_cross, nn.LayerNorm): encoder_hidden_states = self.norm_cross(encoder_hidden_states) elif isinstance(self.norm_cross, nn.GroupNorm): # Group norm norms along the channels dimension and expects # input to be in the shape of (N, C, *). In this case, we want # to norm along the hidden dimension, so we need to move # (batch_size, sequence_length, hidden_size) -> # (batch_size, hidden_size, sequence_length) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) encoder_hidden_states = self.norm_cross(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) else: assert False return encoder_hidden_states @torch.no_grad() def fuse_projections(self, fuse=True): is_cross_attention = self.cross_attention_dim != self.query_dim device = self.to_q.weight.data.device dtype = self.to_q.weight.data.dtype if not is_cross_attention: # fetch weight matrices. concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] # create a new single projection layer and copy over the weights. self.to_qkv = self.linear_cls(in_features, out_features, bias=False, device=device, dtype=dtype) self.to_qkv.weight.copy_(concatenated_weights) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_kv = self.linear_cls(in_features, out_features, bias=False, device=device, dtype=dtype) self.to_kv.weight.copy_(concatenated_weights) self.fused_projections = fuse class AttnProcessor: r""" Default processor for performing attention-related computations. """ def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, ) -> torch.Tensor: residual = hidden_states args = () if USE_PEFT_BACKEND else (scale,) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states, *args) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states, *args) value = attn.to_v(encoder_hidden_states, *args) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionAttnProcessor(nn.Module): r""" Processor for implementing attention for the Custom Diffusion method. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ def __init__( self, train_kv: bool = True, train_q_out: bool = True, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class AttnAddedKVProcessor: r""" Processor for performing attention-related computations with extra learnable key and value matrices for the text encoder. """ def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, scale: float = 1.0, ) -> torch.Tensor: residual = hidden_states args = () if USE_PEFT_BACKEND else (scale,) hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states, *args) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states, *args) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states, *args) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states, *args) value = attn.to_v(hidden_states, *args) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class AttnAddedKVProcessor2_0: r""" Processor for performing scaled dot-product attention (enabled by default if you're using PyTorch 2.0), with extra learnable key and value matrices for the text encoder. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, scale: float = 1.0, ) -> torch.Tensor: residual = hidden_states args = () if USE_PEFT_BACKEND else (scale,) hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states, *args) query = attn.head_to_batch_dim(query, out_dim=4) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4) if not attn.only_cross_attention: key = attn.to_k(hidden_states, *args) value = attn.to_v(hidden_states, *args) key = attn.head_to_batch_dim(key, out_dim=4) value = attn.head_to_batch_dim(value, out_dim=4) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1]) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class XFormersAttnAddedKVProcessor: r""" Processor for implementing memory efficient attention using xFormers. Args: attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__(self, attention_op: Optional[Callable] = None): self.attention_op = attention_op def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class XFormersAttnProcessor: r""" Processor for implementing memory efficient attention using xFormers. Args: attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__(self, attention_op: Optional[Callable] = None): self.attention_op = attention_op def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, ) -> torch.FloatTensor: residual = hidden_states args = () if USE_PEFT_BACKEND else (scale,) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, key_tokens, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, key_tokens, batch_size) if attention_mask is not None: # expand our mask's singleton query_tokens dimension: # [batch*heads, 1, key_tokens] -> # [batch*heads, query_tokens, key_tokens] # so that it can be added as a bias onto the attention scores that xformers computes: # [batch*heads, query_tokens, key_tokens] # we do this explicitly because xformers doesn't broadcast the singleton dimension for us. _, query_tokens, _ = hidden_states.shape attention_mask = attention_mask.expand(-1, query_tokens, -1) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states, *args) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states, *args) value = attn.to_v(encoder_hidden_states, *args) query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class AttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, ) -> torch.FloatTensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) args = () if USE_PEFT_BACKEND else (scale,) query = attn.to_q(hidden_states, *args) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states, *args) value = attn.to_v(encoder_hidden_states, *args) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class FusedAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). It uses fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is currently 🧪 experimental in nature and can change in future. </Tip> """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "FusedAttnProcessor2_0 requires at least PyTorch 2.0, to use it. Please upgrade PyTorch to > 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0, ) -> torch.FloatTensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) args = () if USE_PEFT_BACKEND else (scale,) if encoder_hidden_states is None: qkv = attn.to_qkv(hidden_states, *args) split_size = qkv.shape[-1] // 3 query, key, value = torch.split(qkv, split_size, dim=-1) else: if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) query = attn.to_q(hidden_states, *args) kv = attn.to_kv(encoder_hidden_states, *args) split_size = kv.shape[-1] // 2 key, value = torch.split(kv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionXFormersAttnProcessor(nn.Module): r""" Processor for implementing memory efficient attention using xFormers for the Custom Diffusion method. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. """ def __init__( self, train_kv: bool = True, train_q_out: bool = False, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, attention_op: Optional[Callable] = None, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.attention_op = attention_op # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention( query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale ) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CustomDiffusionAttnProcessor2_0(nn.Module): r""" Processor for implementing attention for the Custom Diffusion method using PyTorch 2.0’s memory-efficient scaled dot-product attention. Args: train_kv (`bool`, defaults to `True`): Whether to newly train the key and value matrices corresponding to the text features. train_q_out (`bool`, defaults to `True`): Whether to newly train query matrices corresponding to the latent image features. hidden_size (`int`, *optional*, defaults to `None`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. out_bias (`bool`, defaults to `True`): Whether to include the bias parameter in `train_q_out`. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. """ def __init__( self, train_kv: bool = True, train_q_out: bool = True, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None, out_bias: bool = True, dropout: float = 0.0, ): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim # `_custom_diffusion` id for easy serialization and loading. if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states) else: query = attn.to_q(hidden_states) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() inner_dim = hidden_states.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if self.train_q_out: # linear proj hidden_states = self.to_out_custom_diffusion[0](hidden_states) # dropout hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class SlicedAttnProcessor: r""" Processor for implementing sliced attention. Args: slice_size (`int`, *optional*): The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and `attention_head_dim` must be a multiple of the `slice_size`. """ def __init__(self, slice_size: int): self.slice_size = slice_size def __call__( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) batch_size_attention, query_tokens, _ = query.shape hidden_states = torch.zeros( (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype ) for i in range(batch_size_attention // self.slice_size): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class SlicedAttnAddedKVProcessor: r""" Processor for implementing sliced attention with extra learnable key and value matrices for the text encoder. Args: slice_size (`int`, *optional*): The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and `attention_head_dim` must be a multiple of the `slice_size`. """ def __init__(self, slice_size): self.slice_size = slice_size def __call__( self, attn: "Attention", hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, temb: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj batch_size_attention, query_tokens, _ = query.shape hidden_states = torch.zeros( (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype ) for i in range(batch_size_attention // self.slice_size): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class SpatialNorm(nn.Module): """ Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. Args: f_channels (`int`): The number of channels for input to group normalization layer, and output of the spatial norm layer. zq_channels (`int`): The number of channels for the quantized vector as described in the paper. """ def __init__( self, f_channels: int, zq_channels: int, ): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-6, affine=True) self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) def forward(self, f: torch.FloatTensor, zq: torch.FloatTensor) -> torch.FloatTensor: f_size = f.shape[-2:] zq = F.interpolate(zq, size=f_size, mode="nearest") norm_f = self.norm_layer(f) new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) return new_f ## Deprecated class LoRAAttnProcessor(nn.Module): r""" Processor for implementing the LoRA attention mechanism. Args: hidden_size (`int`, *optional*): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*): The number of channels in the `encoder_hidden_states`. rank (`int`, defaults to 4): The dimension of the LoRA update matrices. network_alpha (`int`, *optional*): Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. kwargs (`dict`): Additional keyword arguments to pass to the `LoRALinearLayer` layers. """ def __init__( self, hidden_size: int, cross_attention_dim: Optional[int] = None, rank: int = 4, network_alpha: Optional[int] = None, **kwargs, ): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank q_rank = kwargs.pop("q_rank", None) q_hidden_size = kwargs.pop("q_hidden_size", None) q_rank = q_rank if q_rank is not None else rank q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size v_rank = kwargs.pop("v_rank", None) v_hidden_size = kwargs.pop("v_hidden_size", None) v_rank = v_rank if v_rank is not None else rank v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size out_rank = kwargs.pop("out_rank", None) out_hidden_size = kwargs.pop("out_hidden_size", None) out_rank = out_rank if out_rank is not None else rank out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = AttnProcessor() return attn.processor(attn, hidden_states, *args, **kwargs) class LoRAAttnProcessor2_0(nn.Module): r""" Processor for implementing the LoRA attention mechanism using PyTorch 2.0's memory-efficient scaled dot-product attention. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*): The number of channels in the `encoder_hidden_states`. rank (`int`, defaults to 4): The dimension of the LoRA update matrices. network_alpha (`int`, *optional*): Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. kwargs (`dict`): Additional keyword arguments to pass to the `LoRALinearLayer` layers. """ def __init__( self, hidden_size: int, cross_attention_dim: Optional[int] = None, rank: int = 4, network_alpha: Optional[int] = None, **kwargs, ): super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank q_rank = kwargs.pop("q_rank", None) q_hidden_size = kwargs.pop("q_hidden_size", None) q_rank = q_rank if q_rank is not None else rank q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size v_rank = kwargs.pop("v_rank", None) v_hidden_size = kwargs.pop("v_hidden_size", None) v_rank = v_rank if v_rank is not None else rank v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size out_rank = kwargs.pop("out_rank", None) out_hidden_size = kwargs.pop("out_hidden_size", None) out_rank = out_rank if out_rank is not None else rank out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = AttnProcessor2_0() return attn.processor(attn, hidden_states, *args, **kwargs) class LoRAXFormersAttnProcessor(nn.Module): r""" Processor for implementing the LoRA attention mechanism with memory efficient attention using xFormers. Args: hidden_size (`int`, *optional*): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*): The number of channels in the `encoder_hidden_states`. rank (`int`, defaults to 4): The dimension of the LoRA update matrices. attention_op (`Callable`, *optional*, defaults to `None`): The base [operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best operator. network_alpha (`int`, *optional*): Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. kwargs (`dict`): Additional keyword arguments to pass to the `LoRALinearLayer` layers. """ def __init__( self, hidden_size: int, cross_attention_dim: int, rank: int = 4, attention_op: Optional[Callable] = None, network_alpha: Optional[int] = None, **kwargs, ): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank self.attention_op = attention_op q_rank = kwargs.pop("q_rank", None) q_hidden_size = kwargs.pop("q_hidden_size", None) q_rank = q_rank if q_rank is not None else rank q_hidden_size = q_hidden_size if q_hidden_size is not None else hidden_size v_rank = kwargs.pop("v_rank", None) v_hidden_size = kwargs.pop("v_hidden_size", None) v_rank = v_rank if v_rank is not None else rank v_hidden_size = v_hidden_size if v_hidden_size is not None else hidden_size out_rank = kwargs.pop("out_rank", None) out_hidden_size = kwargs.pop("out_hidden_size", None) out_rank = out_rank if out_rank is not None else rank out_hidden_size = out_hidden_size if out_hidden_size is not None else hidden_size self.to_q_lora = LoRALinearLayer(q_hidden_size, q_hidden_size, q_rank, network_alpha) self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(cross_attention_dim or v_hidden_size, v_hidden_size, v_rank, network_alpha) self.to_out_lora = LoRALinearLayer(out_hidden_size, out_hidden_size, out_rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,add_k_proj,add_v_proj,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = XFormersAttnProcessor() return attn.processor(attn, hidden_states, *args, **kwargs) class LoRAAttnAddedKVProcessor(nn.Module): r""" Processor for implementing the LoRA attention mechanism with extra learnable key and value matrices for the text encoder. Args: hidden_size (`int`, *optional*): The hidden size of the attention layer. cross_attention_dim (`int`, *optional*, defaults to `None`): The number of channels in the `encoder_hidden_states`. rank (`int`, defaults to 4): The dimension of the LoRA update matrices. network_alpha (`int`, *optional*): Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs. kwargs (`dict`): Additional keyword arguments to pass to the `LoRALinearLayer` layers. """ def __init__( self, hidden_size: int, cross_attention_dim: Optional[int] = None, rank: int = 4, network_alpha: Optional[int] = None, ): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.rank = rank self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) self.add_k_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.add_v_proj_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha) self.to_k_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) self.to_v_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha) def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: self_cls_name = self.__class__.__name__ deprecate( self_cls_name, "0.26.0", ( f"Make sure use {self_cls_name[4:]} instead by setting" "LoRA layers to `self.{to_q,to_k,to_v,add_k_proj,add_v_proj,to_out[0]}.lora_layer` respectively. This will be done automatically when using" " `LoraLoaderMixin.load_lora_weights`" ), ) attn.to_q.lora_layer = self.to_q_lora.to(hidden_states.device) attn.to_k.lora_layer = self.to_k_lora.to(hidden_states.device) attn.to_v.lora_layer = self.to_v_lora.to(hidden_states.device) attn.to_out[0].lora_layer = self.to_out_lora.to(hidden_states.device) attn._modules.pop("processor") attn.processor = AttnAddedKVProcessor() return attn.processor(attn, hidden_states, *args, **kwargs) class IPAdapterAttnProcessor(nn.Module): r""" Attention processor for Multiple IP-Adapater. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): The context length of the image features. scale (`float` or List[`float`], defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) self.to_v_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, scale=1.0, ): residual = hidden_states # separate ip_hidden_states from encoder_hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): encoder_hidden_states, ip_hidden_states = encoder_hidden_states else: deprecation_message = ( "You have passed a tensor as `encoder_hidden_states`.This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to supress this warning." ) deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]], ) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # for ip-adapter for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip ): ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) ip_attention_probs = attn.get_attention_scores(query, ip_key, None) current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states) hidden_states = hidden_states + scale * current_ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class IPAdapterAttnProcessor2_0(torch.nn.Module): r""" Attention processor for IP-Adapater for PyTorch 2.0. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`): The context length of the image features. scale (`float` or `List[float]`, defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." ) self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.") self.scale = scale self.to_k_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) self.to_v_ip = nn.ModuleList( [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))] ) def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, scale=1.0, ): residual = hidden_states # separate ip_hidden_states from encoder_hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): encoder_hidden_states, ip_hidden_states = encoder_hidden_states else: deprecation_message = ( "You have passed a tensor as `encoder_hidden_states`.This is deprecated and will be removed in a future release." " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to supress this warning." ) deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]], ) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # for ip-adapter for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip( ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip ): ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 current_ip_hidden_states = F.scaled_dot_product_attention( query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False ) current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape( batch_size, -1, attn.heads * head_dim ) current_ip_hidden_states = current_ip_hidden_states.to(query.dtype) hidden_states = hidden_states + scale * current_ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states LORA_ATTENTION_PROCESSORS = ( LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, LoRAAttnAddedKVProcessor, ) ADDED_KV_ATTENTION_PROCESSORS = ( AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, LoRAAttnAddedKVProcessor, ) CROSS_ATTENTION_PROCESSORS = ( AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, ) AttentionProcessor = Union[ AttnProcessor, AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0, # deprecated LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, LoRAAttnAddedKVProcessor, ]
diffusers/src/diffusers/models/attention_processor.py/0
{ "file_path": "diffusers/src/diffusers/models/attention_processor.py", "repo_id": "diffusers", "token_count": 46582 }
117
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pickle import UnpicklingError from typing import Any, Dict, Union import jax import jax.numpy as jnp import msgpack.exceptions from flax.core.frozen_dict import FrozenDict, unfreeze from flax.serialization import from_bytes, to_bytes from flax.traverse_util import flatten_dict, unflatten_dict from huggingface_hub import create_repo, hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, validate_hf_hub_args, ) from requests import HTTPError from .. import __version__, is_torch_available from ..utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, PushToHubMixin, logging, ) from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax logger = logging.get_logger(__name__) class FlaxModelMixin(PushToHubMixin): r""" Base class for all Flax models. [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and saving models. - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`]. """ config_name = CONFIG_NAME _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] _flax_internal_args = ["name", "parent", "dtype"] @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: """ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. """ # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param if mask is None: return jax.tree_map(conditional_cast, params) flat_params = flatten_dict(params) flat_mask, _ = jax.tree_flatten(mask) for masked, key in zip(flat_mask, flat_params.keys()): if masked: param = flat_params[key] flat_params[key] = conditional_cast(param) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision >>> params = model.to_bf16(params) >>> # If you don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_bf16(params, mask) ```""" return self._cast_floating_to(params, jnp.bfloat16, mask) def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # Download model and configuration from huggingface.co >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to illustrate the use of this method, >>> # we'll first cast to fp16 and back to fp32 >>> params = model.to_f16(params) >>> # now cast back to fp32 >>> params = model.to_fp32(params) ```""" return self._cast_floating_to(params, jnp.float32, mask) def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to cast these to float16 >>> params = model.to_fp16(params) >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_fp16(params, mask) ```""" return self._cast_floating_to(params, jnp.float16, mask) def init_weights(self, rng: jax.Array) -> Dict: raise NotImplementedError(f"init_weights method has to be implemented for {self}") @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype = jnp.float32, *model_args, **kwargs, ): r""" Instantiate a pretrained Flax model from a pretrained model configuration. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved using [`~FlaxModelMixin.save_pretrained`]. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified, all the computation will be performed with the given `dtype`. <Tip> This only specifies the dtype of the *computation* and does not influence the dtype of model parameters. If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and [`~FlaxModelMixin.to_bf16`]. </Tip> model_args (sequence of positional arguments, *optional*): All remaining positional arguments are passed to the underlying model's `__init__` method. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it is loaded) and initiate the model (for example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done). - If a configuration is not provided, `kwargs` are first passed to the configuration class initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds to a configuration attribute is used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute are passed to the underlying model's `__init__` function. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # Download model and configuration from huggingface.co and cache. >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/") ``` If you get the error message below, you need to finetune the weights for your downstream task: ```bash Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) from_pt = kwargs.pop("from_pt", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) user_agent = { "diffusers": __version__, "file_type": "model", "framework": "flax", } # Load config if we don't provide one if config is None: config, unused_kwargs = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, **kwargs, ) model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs) # Load model pretrained_path_with_subfolder = ( pretrained_model_name_or_path if subfolder is None else os.path.join(pretrained_model_name_or_path, subfolder) ) if os.path.isdir(pretrained_path_with_subfolder): if from_pt: if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} " ) model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME) # Check if pytorch weights exist instead elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model" " using `from_pt=True`." ) else: raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_path_with_subfolder}." ) else: try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision, ) except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" f"{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your" " internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) if from_pt: if is_torch_available(): from .modeling_utils import load_state_dict else: raise EnvironmentError( "Can't load the model in PyTorch format because PyTorch is not installed. " "Please, install PyTorch or use native Flax weights." ) # Step 1: Get the pytorch file pytorch_model_file = load_state_dict(model_file) # Step 2: Convert the weights state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model) else: try: with open(model_file, "rb") as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(model_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ") # make sure all arrays are stored as jnp.ndarray # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: # https://github.com/google/flax/issues/1261 state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state) # flatten dicts state = flatten_dict(state) params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0)) required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) shape_state = flatten_dict(unfreeze(params_shape_tree)) missing_keys = required_params - set(state.keys()) unexpected_keys = set(state.keys()) - required_params if missing_keys: logger.warning( f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. " "Make sure to call model.init_weights to initialize the missing weights." ) cls._missing_keys = missing_keys for key in state.keys(): if key in shape_state and state[key].shape != shape_state[key].shape: raise ValueError( f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. " ) # remove unexpected keys to not be saved again for unexpected_key in unexpected_keys: del state[unexpected_key] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) return model, unflatten_dict(state) def save_pretrained( self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], is_main_process: bool = True, push_to_hub: bool = False, **kwargs, ): """ Save a model and its configuration file to a directory so that it can be reloaded using the [`~FlaxModelMixin.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to save a model and its configuration file to. Will be created if it doesn't exist. params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", False) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # save model output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) with open(output_model_file, "wb") as f: model_bytes = to_bytes(params) f.write(model_bytes) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
diffusers/src/diffusers/models/modeling_flax_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "repo_id": "diffusers", "token_count": 11976 }
118
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, Optional import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..attention import BasicTransformerBlock, TemporalBasicTransformerBlock from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..resnet import AlphaBlender @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.FloatTensor class TransformerTemporalModel(ModelMixin, ConfigMixin): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. attention_bias (`bool`, *optional*): Configure if the `TransformerBlock` attention should contain a bias parameter. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported activation functions. norm_elementwise_affine (`bool`, *optional*): Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization. double_self_attention (`bool`, *optional*): Configure if each `TransformerBlock` should contain two self-attention layers. positional_embeddings: (`str`, *optional*): The type of positional embeddings to apply to the sequence input before passing use. num_positional_embeddings: (`int`, *optional*): The maximum length of the sequence over which to apply positional embeddings. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, activation_fn: str = "geglu", norm_elementwise_affine: bool = True, double_self_attention: bool = True, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings, ) for d in range(num_layers) ] ) self.proj_out = nn.Linear(inner_dim, in_channels) def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.LongTensor] = None, timestep: Optional[torch.LongTensor] = None, class_labels: torch.LongTensor = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> TransformerTemporalModelOutput: """ The [`TransformerTemporal`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): Input hidden_states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. num_frames (`int`, *optional*, defaults to 1): The number of frames to be processed per batch. This is used to reshape the hidden states. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, channel, height, width = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(hidden_states) # 2. Blocks for block in self.transformer_blocks: hidden_states = block( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = ( hidden_states[None, None, :] .reshape(batch_size, height, width, num_frames, channel) .permute(0, 3, 4, 1, 2) .contiguous() ) hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output) class TransformerSpatioTemporalModel(nn.Module): """ A Transformer model for video-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). out_channels (`int`, *optional*): The number of channels in the output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. """ def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: int = 320, out_channels: Optional[int] = None, num_layers: int = 1, cross_attention_dim: Optional[int] = None, ): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.inner_dim = inner_dim # 2. Define input layers self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6) self.proj_in = nn.Linear(in_channels, inner_dim) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim, ) for d in range(num_layers) ] ) time_mix_inner_dim = inner_dim self.temporal_transformer_blocks = nn.ModuleList( [ TemporalBasicTransformerBlock( inner_dim, time_mix_inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim, ) for _ in range(num_layers) ] ) time_embed_dim = in_channels * 4 self.time_pos_embed = TimestepEmbedding(in_channels, time_embed_dim, out_dim=in_channels) self.time_proj = Timesteps(in_channels, True, 0) self.time_mixer = AlphaBlender(alpha=0.5, merge_strategy="learned_with_images") # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels # TODO: should use out_channels for continuous projections self.proj_out = nn.Linear(inner_dim, in_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, image_only_indicator: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input hidden_states. num_frames (`int`): The number of frames to be processed per batch. This is used to reshape the hidden states. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. image_only_indicator (`torch.LongTensor` of shape `(batch size, num_frames)`, *optional*): A tensor indicating whether the input contains only images. 1 indicates that the input contains only images, 0 indicates that the input contains video frames. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain tuple. Returns: [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`: If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 1. Input batch_frames, _, height, width = hidden_states.shape num_frames = image_only_indicator.shape[-1] batch_size = batch_frames // num_frames time_context = encoder_hidden_states time_context_first_timestep = time_context[None, :].reshape( batch_size, num_frames, -1, time_context.shape[-1] )[:, 0] time_context = time_context_first_timestep[None, :].broadcast_to( height * width, batch_size, 1, time_context.shape[-1] ) time_context = time_context.reshape(height * width * batch_size, 1, time_context.shape[-1]) residual = hidden_states hidden_states = self.norm(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_frames, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) num_frames_emb = torch.arange(num_frames, device=hidden_states.device) num_frames_emb = num_frames_emb.repeat(batch_size, 1) num_frames_emb = num_frames_emb.reshape(-1) t_emb = self.time_proj(num_frames_emb) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_pos_embed(t_emb) emb = emb[:, None, :] # 2. Blocks for block, temporal_block in zip(self.transformer_blocks, self.temporal_transformer_blocks): if self.training and self.gradient_checkpointing: hidden_states = torch.utils.checkpoint.checkpoint( block, hidden_states, None, encoder_hidden_states, None, use_reentrant=False, ) else: hidden_states = block( hidden_states, encoder_hidden_states=encoder_hidden_states, ) hidden_states_mix = hidden_states hidden_states_mix = hidden_states_mix + emb hidden_states_mix = temporal_block( hidden_states_mix, num_frames=num_frames, encoder_hidden_states=time_context, ) hidden_states = self.time_mixer( x_spatial=hidden_states, x_temporal=hidden_states_mix, image_only_indicator=image_only_indicator, ) # 3. Output hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch_frames, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_temporal.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_temporal.py", "repo_id": "diffusers", "token_count": 7287 }
119
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import logging from ..activations import get_activation from ..attention import Attention, FeedForward from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..transformers.transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from .unet_3d_condition import UNet3DConditionOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _to_tensor(inputs, device): if not torch.is_tensor(inputs): # TODO: this requires sync between CPU and GPU. So try to pass `inputs` as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = device.type == "mps" if isinstance(inputs, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 inputs = torch.tensor([inputs], dtype=dtype, device=device) elif len(inputs.shape) == 0: inputs = inputs[None].to(device) return inputs def _collapse_frames_into_batch(sample: torch.Tensor) -> torch.Tensor: batch_size, channels, num_frames, height, width = sample.shape sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) return sample class I2VGenXLTransformerTemporalEncoder(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, activation_fn: str = "geglu", upcast_attention: bool = False, ff_inner_dim: Optional[int] = None, dropout: int = 0.0, ): super().__init__() self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=1e-5) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=True, ) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=False, inner_dim=ff_inner_dim, bias=True, ) def forward( self, hidden_states: torch.FloatTensor, ) -> torch.FloatTensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) ff_output = self.ff(hidden_states, scale=1.0) hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" I2VGenXL UNet. It is a conditional 3D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample-shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. num_attention_heads (`int`, *optional*): The number of attention heads. """ _supports_gradient_checkpointing = False @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), up_block_types: Tuple[str, ...] = ( "UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", ), block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, norm_num_groups: Optional[int] = 32, cross_attention_dim: int = 1024, num_attention_heads: Optional[Union[int, Tuple[int]]] = 64, ): super().__init__() self.sample_size = sample_size # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) # input self.conv_in = nn.Conv2d(in_channels + in_channels, block_out_channels[0], kernel_size=3, padding=1) self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=num_attention_heads, in_channels=block_out_channels[0], num_layers=1, norm_num_groups=norm_num_groups, ) # image embedding self.image_latents_proj_in = nn.Sequential( nn.Conv2d(4, in_channels * 4, 3, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 4, in_channels * 4, 3, stride=1, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 4, in_channels, 3, stride=1, padding=1), ) self.image_latents_temporal_encoder = I2VGenXLTransformerTemporalEncoder( dim=in_channels, num_attention_heads=2, ff_inner_dim=in_channels * 4, attention_head_dim=in_channels, activation_fn="gelu", ) self.image_latents_context_embedding = nn.Sequential( nn.Conv2d(4, in_channels * 8, 3, padding=1), nn.SiLU(), nn.AdaptiveAvgPool2d((32, 32)), nn.Conv2d(in_channels * 8, in_channels * 16, 3, stride=2, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 16, cross_attention_dim, 3, stride=2, padding=1), ) # other embeddings -- time, context, fps, etc. time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn="silu") self.context_embedding = nn.Sequential( nn.Linear(cross_attention_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, cross_attention_dim * in_channels), ) self.fps_embedding = nn.Sequential( nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim) ) # blocks self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-05, resnet_act_fn="silu", resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], downsample_padding=1, dual_cross_attention=False, ) self.down_blocks.append(down_block) # mid self.mid_block = UNetMidBlock3DCrossAttn( in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=1e-05, resnet_act_fn="silu", output_scale_factor=1, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-05, resnet_act_fn="silu", resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=False, resolution_idx=i, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-05) self.conv_act = get_activation("silu") self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None: """ Sets the attention processor to use [feed forward chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers). Parameters: chunk_size (`int`, *optional*): The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually over each tensor of dim=`dim`. dim (`int`, *optional*, defaults to `0`): The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch) or dim=1 (sequence length). """ if dim not in [0, 1]: raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}") # By default chunk size is 1 chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, "set_chunk_feed_forward"): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel._set_gradient_checkpointing def _set_gradient_checkpointing(self, module, value: bool = False) -> None: if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): module.gradient_checkpointing = value # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu def enable_freeu(self, s1, s2, b1, b2): r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stage blocks where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ for i, upsample_block in enumerate(self.up_blocks): setattr(upsample_block, "s1", s1) setattr(upsample_block, "s2", s2) setattr(upsample_block, "b1", b1) setattr(upsample_block, "b2", b2) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu def disable_freeu(self): """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} for i, upsample_block in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def forward( self, sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], fps: torch.Tensor, image_latents: torch.Tensor, image_embeddings: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[UNet3DConditionOutput, Tuple[torch.FloatTensor]]: r""" The [`I2VGenXLUNet`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. fps (`torch.Tensor`): Frames per second for the video being generated. Used as a "micro-condition". image_latents (`torch.FloatTensor`): Image encodings from the VAE. image_embeddings (`torch.FloatTensor`): Projection embeddings of the conditioning image computed with a vision encoder. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_3d_condition.UNet3DConditionOutput`] instead of a plain tuple. Returns: [`~models.unet_3d_condition.UNet3DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_3d_condition.UNet3DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ batch_size, channels, num_frames, height, width = sample.shape # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # 1. time timesteps = _to_tensor(timestep, sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=self.dtype) t_emb = self.time_embedding(t_emb, timestep_cond) # 2. FPS # broadcast to batch dimension in a way that's compatible with ONNX/Core ML fps = fps.expand(fps.shape[0]) fps_emb = self.fps_embedding(self.time_proj(fps).to(dtype=self.dtype)) # 3. time + FPS embeddings. emb = t_emb + fps_emb emb = emb.repeat_interleave(repeats=num_frames, dim=0) # 4. context embeddings. # The context embeddings consist of both text embeddings from the input prompt # AND the image embeddings from the input image. For images, both VAE encodings # and the CLIP image embeddings are incorporated. # So the final `context_embeddings` becomes the query for cross-attention. context_emb = sample.new_zeros(batch_size, 0, self.config.cross_attention_dim) context_emb = torch.cat([context_emb, encoder_hidden_states], dim=1) image_latents_context_embs = _collapse_frames_into_batch(image_latents[:, :, :1, :]) image_latents_context_embs = self.image_latents_context_embedding(image_latents_context_embs) _batch_size, _channels, _height, _width = image_latents_context_embs.shape image_latents_context_embs = image_latents_context_embs.permute(0, 2, 3, 1).reshape( _batch_size, _height * _width, _channels ) context_emb = torch.cat([context_emb, image_latents_context_embs], dim=1) image_emb = self.context_embedding(image_embeddings) image_emb = image_emb.view(-1, self.config.in_channels, self.config.cross_attention_dim) context_emb = torch.cat([context_emb, image_emb], dim=1) context_emb = context_emb.repeat_interleave(repeats=num_frames, dim=0) image_latents = _collapse_frames_into_batch(image_latents) image_latents = self.image_latents_proj_in(image_latents) image_latents = ( image_latents[None, :] .reshape(batch_size, num_frames, channels, height, width) .permute(0, 3, 4, 1, 2) .reshape(batch_size * height * width, num_frames, channels) ) image_latents = self.image_latents_temporal_encoder(image_latents) image_latents = image_latents.reshape(batch_size, height, width, num_frames, channels).permute(0, 4, 3, 1, 2) # 5. pre-process sample = torch.cat([sample, image_latents], dim=1) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) sample = self.transformer_in( sample, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # 6. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=context_emb, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples # 7. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=context_emb, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) # 8. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=context_emb, upsample_size=upsample_size, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, num_frames=num_frames, ) # 9. post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) # reshape to (batch, channel, framerate, width, height) sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) if not return_dict: return (sample,) return UNet3DConditionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_i2vgen_xl.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_i2vgen_xl.py", "repo_id": "diffusers", "token_count": 13603 }
120
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.fft as fft from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler >>> from diffusers.utils import export_to_gif >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") >>> pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False) >>> output = pipe(prompt="A corgi walking in the park") >>> frames = output.frames[0] >>> export_to_gif(frames, "animation.gif") ``` """ def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): batch_size, channels, num_frames, height, width = video.shape outputs = [] for batch_idx in range(batch_size): batch_vid = video[batch_idx].permute(1, 0, 2, 3) batch_output = processor.postprocess(batch_vid, output_type) outputs.append(batch_output) if output_type == "np": outputs = np.stack(outputs) elif output_type == "pt": outputs = torch.stack(outputs) elif not output_type == "pil": raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil]") return outputs def _get_freeinit_freq_filter( shape: Tuple[int, ...], device: Union[str, torch.dtype], filter_type: str, order: float, spatial_stop_frequency: float, temporal_stop_frequency: float, ) -> torch.Tensor: r"""Returns the FreeInit filter based on filter type and other input conditions.""" T, H, W = shape[-3], shape[-2], shape[-1] mask = torch.zeros(shape) if spatial_stop_frequency == 0 or temporal_stop_frequency == 0: return mask if filter_type == "butterworth": def retrieve_mask(x): return 1 / (1 + (x / spatial_stop_frequency**2) ** order) elif filter_type == "gaussian": def retrieve_mask(x): return math.exp(-1 / (2 * spatial_stop_frequency**2) * x) elif filter_type == "ideal": def retrieve_mask(x): return 1 if x <= spatial_stop_frequency * 2 else 0 else: raise NotImplementedError("`filter_type` must be one of gaussian, butterworth or ideal") for t in range(T): for h in range(H): for w in range(W): d_square = ( ((spatial_stop_frequency / temporal_stop_frequency) * (2 * t / T - 1)) ** 2 + (2 * h / H - 1) ** 2 + (2 * w / W - 1) ** 2 ) mask[..., t, h, w] = retrieve_mask(d_square) return mask.to(device) def _freq_mix_3d(x: torch.Tensor, noise: torch.Tensor, LPF: torch.Tensor) -> torch.Tensor: r"""Noise reinitialization.""" # FFT x_freq = fft.fftn(x, dim=(-3, -2, -1)) x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1)) noise_freq = fft.fftn(noise, dim=(-3, -2, -1)) noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1)) # frequency mix HPF = 1 - LPF x_freq_low = x_freq * LPF noise_freq_high = noise_freq * HPF x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain # IFFT x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1)) x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real return x_mixed class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin): r""" Pipeline for text-to-video generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. motion_adapter ([`MotionAdapter`]): A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _optional_components = ["feature_extractor", "image_encoder"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, motion_adapter: MotionAdapter, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds(self, ip_adapter_image, device, num_images_per_prompt): if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0) if self.do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) return image_embeds # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents batch_size, channels, num_frames, height, width = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = ( image[None, :] .reshape( ( batch_size, num_frames, -1, ) + image.shape[2:] ) .permute(0, 2, 1, 3, 4) ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 video = video.float() return video # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. The suffixes after the scaling factors represent the stages where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if not hasattr(self, "unet"): raise ValueError("The pipeline must have `unet` for using FreeU.") self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu def disable_freeu(self): """Disables the FreeU mechanism if enabled.""" self.unet.disable_freeu() @property def free_init_enabled(self): return hasattr(self, "_free_init_num_iters") and self._free_init_num_iters is not None def enable_free_init( self, num_iters: int = 3, use_fast_sampling: bool = False, method: str = "butterworth", order: int = 4, spatial_stop_frequency: float = 0.25, temporal_stop_frequency: float = 0.25, generator: torch.Generator = None, ): """Enables the FreeInit mechanism as in https://arxiv.org/abs/2312.07537. This implementation has been adapted from the [official repository](https://github.com/TianxingWu/FreeInit). Args: num_iters (`int`, *optional*, defaults to `3`): Number of FreeInit noise re-initialization iterations. use_fast_sampling (`bool`, *optional*, defaults to `False`): Whether or not to speedup sampling procedure at the cost of probably lower quality results. Enables the "Coarse-to-Fine Sampling" strategy, as mentioned in the paper, if set to `True`. method (`str`, *optional*, defaults to `butterworth`): Must be one of `butterworth`, `ideal` or `gaussian` to use as the filtering method for the FreeInit low pass filter. order (`int`, *optional*, defaults to `4`): Order of the filter used in `butterworth` method. Larger values lead to `ideal` method behaviour whereas lower values lead to `gaussian` method behaviour. spatial_stop_frequency (`float`, *optional*, defaults to `0.25`): Normalized stop frequency for spatial dimensions. Must be between 0 to 1. Referred to as `d_s` in the original implementation. temporal_stop_frequency (`float`, *optional*, defaults to `0.25`): Normalized stop frequency for temporal dimensions. Must be between 0 to 1. Referred to as `d_t` in the original implementation. generator (`torch.Generator`, *optional*, defaults to `0.25`): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make FreeInit generation deterministic. """ self._free_init_num_iters = num_iters self._free_init_use_fast_sampling = use_fast_sampling self._free_init_method = method self._free_init_order = order self._free_init_spatial_stop_frequency = spatial_stop_frequency self._free_init_temporal_stop_frequency = temporal_stop_frequency self._free_init_generator = generator def disable_free_init(self): """Disables the FreeInit mechanism if enabled.""" self._free_init_num_iters = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def _denoise_loop( self, timesteps, num_inference_steps, do_classifier_free_guidance, guidance_scale, num_warmup_steps, prompt_embeds, negative_prompt_embeds, latents, cross_attention_kwargs, added_cond_kwargs, extra_step_kwargs, callback, callback_steps, callback_on_step_end, callback_on_step_end_tensor_inputs, ): """Denoising loop for AnimateDiff.""" with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) return latents def _free_init_loop( self, height, width, num_frames, num_channels_latents, batch_size, num_videos_per_prompt, denoise_args, device, ): """Denoising loop for AnimateDiff using FreeInit noise reinitialization technique.""" latents = denoise_args.get("latents") prompt_embeds = denoise_args.get("prompt_embeds") timesteps = denoise_args.get("timesteps") num_inference_steps = denoise_args.get("num_inference_steps") latent_shape = ( batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) free_init_filter_shape = ( 1, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) free_init_freq_filter = _get_freeinit_freq_filter( shape=free_init_filter_shape, device=device, filter_type=self._free_init_method, order=self._free_init_order, spatial_stop_frequency=self._free_init_spatial_stop_frequency, temporal_stop_frequency=self._free_init_temporal_stop_frequency, ) with self.progress_bar(total=self._free_init_num_iters) as free_init_progress_bar: for i in range(self._free_init_num_iters): # For the first FreeInit iteration, the original latent is used without modification. # Subsequent iterations apply the noise reinitialization technique. if i == 0: initial_noise = latents.detach().clone() else: current_diffuse_timestep = ( self.scheduler.config.num_train_timesteps - 1 ) # diffuse to t=999 noise level diffuse_timesteps = torch.full((batch_size,), current_diffuse_timestep).long() z_T = self.scheduler.add_noise( original_samples=latents, noise=initial_noise, timesteps=diffuse_timesteps.to(device) ).to(dtype=torch.float32) z_rand = randn_tensor( shape=latent_shape, generator=self._free_init_generator, device=device, dtype=torch.float32, ) latents = _freq_mix_3d(z_T, z_rand, LPF=free_init_freq_filter) latents = latents.to(prompt_embeds.dtype) # Coarse-to-Fine Sampling for faster inference (can lead to lower quality) if self._free_init_use_fast_sampling: current_num_inference_steps = int(num_inference_steps / self._free_init_num_iters * (i + 1)) self.scheduler.set_timesteps(current_num_inference_steps, device=device) timesteps = self.scheduler.timesteps denoise_args.update({"timesteps": timesteps, "num_inference_steps": current_num_inference_steps}) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order denoise_args.update({"latents": latents, "num_warmup_steps": num_warmup_steps}) latents = self._denoise_loop(**denoise_args) free_init_progress_bar.update() return latents def _retrieve_video_frames(self, latents, output_type, return_dict): """Helper function to handle latents to output conversion.""" if output_type == "latent": return AnimateDiffPipelineOutput(frames=latents) video_tensor = self.decode_latents(latents) video = tensor2vid(video_tensor, self.image_processor, output_type=output_type) if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, num_frames: Optional[int] = 16, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated video. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated video. num_frames (`int`, *optional*, defaults to 16): The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds amounts to 2 seconds of video. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality videos at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. Latents should be of shape `(batch_size, num_channel, num_frames, height, width)`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeine class. Examples: Returns: [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, device, batch_size * num_videos_per_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps self._num_timesteps = len(timesteps) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order denoise_args = { "timesteps": timesteps, "num_inference_steps": num_inference_steps, "do_classifier_free_guidance": self.do_classifier_free_guidance, "guidance_scale": guidance_scale, "num_warmup_steps": num_warmup_steps, "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "latents": latents, "cross_attention_kwargs": self.cross_attention_kwargs, "added_cond_kwargs": added_cond_kwargs, "extra_step_kwargs": extra_step_kwargs, "callback": callback, "callback_steps": callback_steps, "callback_on_step_end": callback_on_step_end, "callback_on_step_end_tensor_inputs": callback_on_step_end_tensor_inputs, } if self.free_init_enabled: latents = self._free_init_loop( height=height, width=width, num_frames=num_frames, num_channels_latents=num_channels_latents, batch_size=batch_size, num_videos_per_prompt=num_videos_per_prompt, denoise_args=denoise_args, device=device, ) else: latents = self._denoise_loop(**denoise_args) video = self._retrieve_video_frames(latents, output_type, return_dict) # 9. Offload all models self.maybe_free_model_hooks() return video
diffusers/src/diffusers/pipelines/animatediff/pipeline_animatediff.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/animatediff/pipeline_animatediff.py", "repo_id": "diffusers", "token_count": 22983 }
121
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["multicontrolnet"] = ["MultiControlNetModel"] _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/__init__.py", "repo_id": "diffusers", "token_count": 1294 }
122
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = { "timesteps": [ "fast27_timesteps", "smart100_timesteps", "smart185_timesteps", "smart27_timesteps", "smart50_timesteps", "super100_timesteps", "super27_timesteps", "super40_timesteps", ] } try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_if"] = ["IFPipeline"] _import_structure["pipeline_if_img2img"] = ["IFImg2ImgPipeline"] _import_structure["pipeline_if_img2img_superresolution"] = ["IFImg2ImgSuperResolutionPipeline"] _import_structure["pipeline_if_inpainting"] = ["IFInpaintingPipeline"] _import_structure["pipeline_if_inpainting_superresolution"] = ["IFInpaintingSuperResolutionPipeline"] _import_structure["pipeline_if_superresolution"] = ["IFSuperResolutionPipeline"] _import_structure["pipeline_output"] = ["IFPipelineOutput"] _import_structure["safety_checker"] = ["IFSafetyChecker"] _import_structure["watermark"] = ["IFWatermarker"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_if import IFPipeline from .pipeline_if_img2img import IFImg2ImgPipeline from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .timesteps import ( fast27_timesteps, smart27_timesteps, smart50_timesteps, smart100_timesteps, smart185_timesteps, super27_timesteps, super40_timesteps, super100_timesteps, ) from .watermark import IFWatermarker else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deepfloyd_if/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deepfloyd_if/__init__.py", "repo_id": "diffusers", "token_count": 1266 }
123
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): @register_to_config def __init__( self, max_length: int, vocab_size: int, d_model: int, dropout_rate: float, num_layers: int, num_heads: int, d_kv: int, d_ff: int, feed_forward_proj: str, is_decoder: bool = False, ): super().__init__() self.token_embedder = nn.Embedding(vocab_size, d_model) self.position_encoding = nn.Embedding(max_length, d_model) self.position_encoding.weight.requires_grad = False self.dropout_pre = nn.Dropout(p=dropout_rate) t5config = T5Config( vocab_size=vocab_size, d_model=d_model, num_heads=num_heads, d_kv=d_kv, d_ff=d_ff, dropout_rate=dropout_rate, feed_forward_proj=feed_forward_proj, is_decoder=is_decoder, is_encoder_decoder=False, ) self.encoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = T5Block(t5config) self.encoders.append(lyr) self.layer_norm = T5LayerNorm(d_model) self.dropout_post = nn.Dropout(p=dropout_rate) def forward(self, encoder_input_tokens, encoder_inputs_mask): x = self.token_embedder(encoder_input_tokens) seq_length = encoder_input_tokens.shape[1] inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) x += self.position_encoding(inputs_positions) x = self.dropout_pre(x) # inverted the attention mask input_shape = encoder_input_tokens.size() extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) for lyr in self.encoders: x = lyr(x, extended_attention_mask)[0] x = self.layer_norm(x) return self.dropout_post(x), encoder_inputs_mask
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py", "repo_id": "diffusers", "token_count": 1255 }
124
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Versatile Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModelWithProjection image_unet: UNet2DConditionModel text_unet: UNetFlatConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers _optional_components = ["text_unet"] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, image_unet: UNet2DConditionModel, text_unet: UNetFlatConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if self.text_unet is not None: self._swap_unet_attention_blocks() def _swap_unet_attention_blocks(self): """ Swap the `Transformer2DModel` blocks between the image and text UNets """ for name, module in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): parent_name, index = name.rsplit(".", 1) index = int(index) self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = ( self.text_unet.get_submodule(parent_name)[index], self.image_unet.get_submodule(parent_name)[index], ) def remove_unused_weights(self): self.register_modules(text_unet=None) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ def normalize_embeddings(encoder_output): embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) embeds_pooled = encoder_output.text_embeds embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) return embeds batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = normalize_embeddings(prompt_embeds) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionTextToImagePipeline >>> import torch >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe.remove_unused_weights() >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0] >>> image.save("./astronaut.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py", "repo_id": "diffusers", "token_count": 9852 }
125
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler, UnCLIPScheduler from ...utils import deprecate, logging, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky2_2 import KandinskyV22Pipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt=prompt, num_inference_steps=25).images[0] ``` """ IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForImage2Image import torch import requests from io import BytesIO from PIL import Image import os pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") image.thumbnail((768, 768)) image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] ``` """ INPAINT_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch import numpy as np pipe = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) # Let's mask out an area above the cat's head mask[:250, 250:-250] = 1 image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] ``` """ class KandinskyV22CombinedPipeline(DiffusionPipeline): """ Combined Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. prior_image_processor ([`CLIPImageProcessor`]): A image_processor to be used to preprocess image from clip. """ model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" _load_connected_pipes = True def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyV22PriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyV22Pipeline( unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. prior_callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference of the prior pipeline. The function is called with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. prior_callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your prior pipeline class. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference of the decoder pipeline. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt outputs = self.decoder_pipe( image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self.maybe_free_model_hooks() return outputs class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for image-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. prior_image_processor ([`CLIPImageProcessor`]): A image_processor to be used to preprocess image from clip. """ model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" _load_connected_pipes = True def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyV22PriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyV22Img2ImgPipeline( unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ self.prior_pipe.enable_model_cpu_offload() self.decoder_pipe.enable_model_cpu_offload() def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, strength: float = 0.3, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image outputs = self.decoder_pipe( image=image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, strength=strength, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self.maybe_free_model_hooks() return outputs class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for inpainting generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. prior_image_processor ([`CLIPImageProcessor`]): A image_processor to be used to preprocess image from clip. """ model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" _load_connected_pipes = True def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyV22PriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyV22InpaintPipeline( unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], mask_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. mask_image (`np.array`): Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. prior_callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. prior_callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_kwargs = {} if kwargs.get("prior_callback", None) is not None: prior_kwargs["callback"] = kwargs.pop("prior_callback") deprecate( "prior_callback", "1.0.0", "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", ) if kwargs.get("prior_callback_steps", None) is not None: deprecate( "prior_callback_steps", "1.0.0", "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", ) prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, **prior_kwargs, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image if ( isinstance(mask_image, (list, tuple)) and len(mask_image) < image_embeds.shape[0] and image_embeds.shape[0] % len(mask_image) == 0 ): mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image outputs = self.decoder_pipe( image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) self.maybe_free_model_hooks() return outputs
diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py", "repo_id": "diffusers", "token_count": 18701 }
126
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from ...models import UNet2DModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def preprocess(image): w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image) return 2.0 * image - 1.0 class LDMSuperResolutionPipeline(DiffusionPipeline): r""" A pipeline for image super-resolution using latent diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`], [`DPMSolverMultistepScheduler`], or [`PNDMScheduler`]. """ def __init__( self, vqvae: VQModel, unet: UNet2DModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, image: Union[torch.Tensor, PIL.Image.Image] = None, batch_size: Optional[int] = 1, num_inference_steps: Optional[int] = 100, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[Tuple, ImagePipelineOutput]: r""" The call function to the pipeline for generation. Args: image (`torch.Tensor` or `PIL.Image.Image`): `Image` or tensor representing an image batch to be used as the starting point for the process. batch_size (`int`, *optional*, defaults to 1): Number of images to generate. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> import requests >>> from PIL import Image >>> from io import BytesIO >>> from diffusers import LDMSuperResolutionPipeline >>> import torch >>> # load model and scheduler >>> pipeline = LDMSuperResolutionPipeline.from_pretrained("CompVis/ldm-super-resolution-4x-openimages") >>> pipeline = pipeline.to("cuda") >>> # let's download an image >>> url = ( ... "https://user-images.githubusercontent.com/38061659/199705896-b48e17b8-b231-47cd-a270-4ffa5a93fa3e.png" ... ) >>> response = requests.get(url) >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB") >>> low_res_img = low_res_img.resize((128, 128)) >>> # run pipeline in inference (sample random noise and denoise) >>> upscaled_image = pipeline(low_res_img, num_inference_steps=100, eta=1).images[0] >>> # save image >>> upscaled_image.save("ldm_generated_image.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, torch.Tensor): batch_size = image.shape[0] else: raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(image)}") if isinstance(image, PIL.Image.Image): image = preprocess(image) height, width = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image latents_shape = (batch_size, self.unet.config.in_channels // 2, height, width) latents_dtype = next(self.unet.parameters()).dtype latents = randn_tensor(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) image = image.to(device=self.device, dtype=latents_dtype) # set timesteps and move to the correct device self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps_tensor = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_kwargs = {} if accepts_eta: extra_kwargs["eta"] = eta for t in self.progress_bar(timesteps_tensor): # concat latents and low resolution image in the channel dimension. latents_input = torch.cat([latents, image], dim=1) latents_input = self.scheduler.scale_model_input(latents_input, t) # predict the noise residual noise_pred = self.unet(latents_input, t).sample # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample # decode the image latents with the VQVAE image = self.vqvae.decode(latents).sample image = torch.clamp(image, -1.0, 1.0) image = image / 2 + 0.5 image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py", "repo_id": "diffusers", "token_count": 3451 }
127
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["camera"] = ["create_pan_cameras"] _import_structure["pipeline_shap_e"] = ["ShapEPipeline"] _import_structure["pipeline_shap_e_img2img"] = ["ShapEImg2ImgPipeline"] _import_structure["renderer"] = [ "BoundingBoxVolume", "ImportanceRaySampler", "MLPNeRFModelOutput", "MLPNeRSTFModel", "ShapEParamsProjModel", "ShapERenderer", "StratifiedRaySampler", "VoidNeRFModel", ] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, ) else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/shap_e/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/shap_e/__init__.py", "repo_id": "diffusers", "token_count": 939 }
128
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput, is_flax_available @dataclass class StableDiffusionPipelineOutput(BaseOutput): """ Output class for Stable Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`) List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] if is_flax_available(): import flax @flax.struct.dataclass class FlaxStableDiffusionPipelineOutput(BaseOutput): """ Output class for Flax-based Stable Diffusion pipelines. Args: images (`np.ndarray`): Denoised images of array shape of `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: np.ndarray nsfw_content_detected: List[bool]
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_output.py", "repo_id": "diffusers", "token_count": 598 }
129
from dataclasses import dataclass from typing import List, Union import numpy as np import torch from ...utils import ( BaseOutput, ) @dataclass class TextToVideoSDPipelineOutput(BaseOutput): """ Output class for text-to-video pipelines. Args: frames (`List[np.ndarray]` or `torch.FloatTensor`) List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as a `torch` tensor. The length of the list denotes the video length (the number of frames). """ frames: Union[List[np.ndarray], torch.FloatTensor]
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py", "repo_id": "diffusers", "token_count": 221 }
130
# Copyright (c) 2023 Dominic Rampas MIT License # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__( self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=["CT", "CTA", "CTA", "CTA"], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1, ): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden) # CONDITIONING self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList( [ nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet)) ] ) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1), WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), ) def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0): if block_type == "C": return ResBlockStageB(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout) elif block_type == "A": return AttnBlock(c_hidden, c_cond, nhead, self_attn=True, dropout=dropout) elif block_type == "T": return TimestepBlock(c_hidden, c_r) else: raise ValueError(f"Block type {block_type} not supported") # BLOCKS # -- down blocks self.down_blocks = nn.ModuleList() for i in range(len(c_hidden)): down_block = nn.ModuleList() if i > 0: down_block.append( nn.Sequential( WuerstchenLayerNorm(c_hidden[i - 1], elementwise_affine=False, eps=1e-6), nn.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2), ) ) for _ in range(blocks[i]): for block_type in level_config[i]: c_skip = c_cond if inject_effnet[i] else 0 down_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) self.down_blocks.append(down_block) # -- up blocks self.up_blocks = nn.ModuleList() for i in reversed(range(len(c_hidden))): up_block = nn.ModuleList() for j in range(blocks[i]): for k, block_type in enumerate(level_config[i]): c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 c_skip += c_cond if inject_effnet[i] else 0 up_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) if i > 0: up_block.append( nn.Sequential( WuerstchenLayerNorm(c_hidden[i], elementwise_affine=False, eps=1e-6), nn.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2), ) ) self.up_blocks.append(up_block) # OUTPUT self.clf = nn.Sequential( WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), nn.Conv2d(c_hidden[0], 2 * c_out * (patch_size**2), kernel_size=1), nn.PixelShuffle(patch_size), ) # --- WEIGHT INIT --- self.apply(self._init_weights) def _init_weights(self, m): # General init if isinstance(m, (nn.Conv2d, nn.Linear)): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) for mapper in self.effnet_mappers: if mapper is not None: nn.init.normal_(mapper.weight, std=0.02) # conditionings nn.init.normal_(self.clip_mapper.weight, std=0.02) # conditionings nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs nn.init.constant_(self.clf[1].weight, 0) # outputs # blocks for level_block in self.down_blocks + self.up_blocks: for block in level_block: if isinstance(block, ResBlockStageB): block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks)) elif isinstance(block, TimestepBlock): nn.init.constant_(block.mapper.weight, 0) def gen_r_embedding(self, r, max_positions=10000): r = r * max_positions half_dim = self.c_r // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.c_r % 2 == 1: # zero pad emb = nn.functional.pad(emb, (0, 1), mode="constant") return emb.to(dtype=r.dtype) def gen_c_embeddings(self, clip): clip = self.clip_mapper(clip) clip = self.seq_norm(clip) return clip def _down_encode(self, x, r_embed, effnet, clip=None): level_outputs = [] for i, down_block in enumerate(self.down_blocks): effnet_c = None for block in down_block: if isinstance(block, ResBlockStageB): if effnet_c is None and self.effnet_mappers[i] is not None: dtype = effnet.dtype effnet_c = self.effnet_mappers[i]( nn.functional.interpolate( effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True ).to(dtype) ) skip = effnet_c if self.effnet_mappers[i] is not None else None x = block(x, skip) elif isinstance(block, AttnBlock): x = block(x, clip) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) level_outputs.insert(0, x) return level_outputs def _up_decode(self, level_outputs, r_embed, effnet, clip=None): x = level_outputs[0] for i, up_block in enumerate(self.up_blocks): effnet_c = None for j, block in enumerate(up_block): if isinstance(block, ResBlockStageB): if effnet_c is None and self.effnet_mappers[len(self.down_blocks) + i] is not None: dtype = effnet.dtype effnet_c = self.effnet_mappers[len(self.down_blocks) + i]( nn.functional.interpolate( effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True ).to(dtype) ) skip = level_outputs[i] if j == 0 and i > 0 else None if effnet_c is not None: if skip is not None: skip = torch.cat([skip, effnet_c], dim=1) else: skip = effnet_c x = block(x, skip) elif isinstance(block, AttnBlock): x = block(x, clip) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) return x def forward(self, x, r, effnet, clip=None, x_cat=None, eps=1e-3, return_noise=True): if x_cat is not None: x = torch.cat([x, x_cat], dim=1) # Process the conditioning embeddings r_embed = self.gen_r_embedding(r) if clip is not None: clip = self.gen_c_embeddings(clip) # Model Blocks x_in = x x = self.embedding(x) level_outputs = self._down_encode(x, r_embed, effnet, clip) x = self._up_decode(level_outputs, r_embed, effnet, clip) a, b = self.clf(x).chunk(2, dim=1) b = b.sigmoid() * (1 - eps * 2) + eps if return_noise: return (x_in - a) / b else: return a, b class ResBlockStageB(nn.Module): def __init__(self, c, c_skip=None, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(c + c_skip, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c), ) def forward(self, x, x_skip=None): x_res = x x = self.norm(self.depthwise(x)) if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x + x_res
diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py", "repo_id": "diffusers", "token_count": 5545 }
131
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class DDIMSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.FloatTensor`): the betas that the scheduler is being initialized with. Returns: `torch.FloatTensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDIMInverseScheduler(SchedulerMixin, ConfigMixin): """ `DDIMInverseScheduler` is the reverse scheduler of [`DDIMScheduler`]. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. clip_sample (`bool`, defaults to `True`): Clip the predicted sample for numerical stability. clip_sample_range (`float`, defaults to 1.0): The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. set_alpha_to_one (`bool`, defaults to `True`): Each diffusion step uses the alphas product value at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to 0, otherwise it uses the alpha value at step `num_train_timesteps - 1`. steps_offset (`int`, defaults to 0): An offset added to the inference steps. You can use a combination of `offset=1` and `set_alpha_to_one=False` to make the last step use `num_train_timesteps - 1` for the previous alpha product. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ order = 1 ignore_for_config = ["kwargs"] _deprecated_kwargs = ["set_alpha_to_zero"] @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, steps_offset: int = 0, prediction_type: str = "epsilon", clip_sample_range: float = 1.0, timestep_spacing: str = "leading", rescale_betas_zero_snr: bool = False, **kwargs, ): if kwargs.get("set_alpha_to_zero", None) is not None: deprecation_message = ( "The `set_alpha_to_zero` argument is deprecated. Please use `set_alpha_to_one` instead." ) deprecate("set_alpha_to_zero", "1.0.0", deprecation_message, standard_warn=False) set_alpha_to_one = kwargs["set_alpha_to_zero"] if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the initial step, there is no current alphas_cumprod, and the index is out of bounds # `set_alpha_to_one` decides whether we set this parameter simply to one # in this case, self.step() just output the predicted noise # or whether we use the initial alpha used in training the diffusion model. self.initial_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps).copy().astype(np.int64)) # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps # "leading" and "trailing" corresponds to annotation of Table 1. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round().copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)[::-1]).astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." ) self.timesteps = torch.from_numpy(timesteps).to(device) def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, return_dict: bool = True, ) -> Union[DDIMSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. eta (`float`): The weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`, defaults to `False`): If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` has no effect. variance_noise (`torch.FloatTensor`): Alternative to generating noise with `generator` by directly providing the noise for the variance itself. Useful for methods such as [`CycleDiffusion`]. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # 1. get previous step value (=t+1) prev_timestep = timestep timestep = min( timestep - self.config.num_train_timesteps // self.num_inference_steps, self.config.num_train_timesteps - 1 ) # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process alpha_prod_t = self.alphas_cumprod[timestep] if timestep >= 0 else self.initial_alpha_cumprod alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) pred_epsilon = model_output elif self.config.prediction_type == "sample": pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_ddim_inverse.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddim_inverse.py", "repo_id": "diffusers", "token_count": 7389 }
132
# Copyright 2023 Zhejiang University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class IPNDMScheduler(SchedulerMixin, ConfigMixin): """ A fourth-order Improved Pseudo Linear Multistep scheduler. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. """ order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, trained_betas: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(num_train_timesteps) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. self.pndm_order = 4 # running values self.ets = [] self._step_index = None self._begin_index = None @property def step_index(self): """ The index counter for current timestep. It will increae 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps steps = torch.linspace(1, 0, num_inference_steps + 1)[:-1] steps = torch.cat([steps, torch.tensor([0.0])]) if self.config.trained_betas is not None: self.betas = torch.tensor(self.config.trained_betas, dtype=torch.float32) else: self.betas = torch.sin(steps * math.pi / 2) ** 2 self.alphas = (1.0 - self.betas**2) ** 0.5 timesteps = (torch.atan2(self.betas, self.alphas) / math.pi * 2)[:-1] self.timesteps = timesteps.to(device) self.ets = [] self._step_index = None self._begin_index = None # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the linear multistep method. It performs one forward pass multiple times to approximate the solution. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) timestep_index = self.step_index prev_timestep_index = self.step_index + 1 ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(ets) if len(self.ets) == 1: ets = self.ets[-1] elif len(self.ets) == 2: ets = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: ets = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets) # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets): alpha = self.alphas[timestep_index] sigma = self.betas[timestep_index] next_alpha = self.alphas[prev_timestep_index] next_sigma = self.betas[prev_timestep_index] pred = (sample - sigma * ets) / max(alpha, 1e-8) prev_sample = next_alpha * pred + ets * next_sigma return prev_sample def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_ipndm.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ipndm.py", "repo_id": "diffusers", "token_count": 3645 }
133
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from huggingface_hub.utils import validate_hf_hub_args from ..utils import BaseOutput, PushToHubMixin SCHEDULER_CONFIG_NAME = "scheduler_config.json" # NOTE: We make this type an enum because it simplifies usage in docs and prevents # circular imports when used for `_compatibles` within the schedulers module. # When it's used as a type in pipelines, it really is a Union because the actual # scheduler instance is passed in. class FlaxKarrasDiffusionSchedulers(Enum): FlaxDDIMScheduler = 1 FlaxDDPMScheduler = 2 FlaxPNDMScheduler = 3 FlaxLMSDiscreteScheduler = 4 FlaxDPMSolverMultistepScheduler = 5 FlaxEulerDiscreteScheduler = 6 @dataclass class FlaxSchedulerOutput(BaseOutput): """ Base class for the scheduler's step function output. Args: prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: jnp.ndarray class FlaxSchedulerMixin(PushToHubMixin): """ Mixin containing common functions for the schedulers. Class attributes: - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that `from_config` can be used from a class different than the one used to save the config (should be overridden by parent class). """ config_name = SCHEDULER_CONFIG_NAME ignore_for_config = ["dtype"] _compatibles = [] has_compatibles = True @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, subfolder: Optional[str] = None, return_unused_kwargs=False, **kwargs, ): r""" Instantiate a Scheduler class from a pre-defined JSON-file. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an organization name, like `google/ddpm-celebahq-256`. - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`. subfolder (`str`, *optional*): In case the relevant files are located inside a subfolder of the model repo (either remote in huggingface.co or downloaded locally), you can specify the folder name here. return_unused_kwargs (`bool`, *optional*, defaults to `False`): Whether kwargs that are not consumed by the Python class should be returned or not. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> """ config, kwargs = cls.load_config( pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, **kwargs, ) scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs) if hasattr(scheduler, "create_state") and getattr(scheduler, "has_state", False): state = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the [`~FlaxSchedulerMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) @property def compatibles(self): """ Returns all schedulers that are compatible with this scheduler Returns: `List[SchedulerMixin]`: List of compatible schedulers """ return self._get_compatibles() @classmethod def _get_compatibles(cls): compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) diffusers_library = importlib.import_module(__name__.split(".")[0]) compatible_classes = [ getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) ] return compatible_classes def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray: assert len(shape) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape) def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999, dtype=jnp.float32) -> jnp.ndarray: """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. Returns: betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs """ def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return jnp.array(betas, dtype=dtype) @flax.struct.dataclass class CommonSchedulerState: alphas: jnp.ndarray betas: jnp.ndarray alphas_cumprod: jnp.ndarray @classmethod def create(cls, scheduler): config = scheduler.config if config.trained_betas is not None: betas = jnp.asarray(config.trained_betas, dtype=scheduler.dtype) elif config.beta_schedule == "linear": betas = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. betas = ( jnp.linspace( config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule betas = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype) else: raise NotImplementedError( f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" ) alphas = 1.0 - betas alphas_cumprod = jnp.cumprod(alphas, axis=0) return cls( alphas=alphas, betas=betas, alphas_cumprod=alphas_cumprod, ) def get_sqrt_alpha_prod( state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray ): alphas_cumprod = state.alphas_cumprod sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def add_noise_common( state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray ): sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, original_samples, noise, timesteps) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity_common(state: CommonSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, sample, noise, timesteps) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
diffusers/src/diffusers/schedulers/scheduling_utils_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py", "repo_id": "diffusers", "token_count": 5031 }
134
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "k_diffusion"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "k_diffusion"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "k_diffusion"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "k_diffusion"]) class StableDiffusionXLKDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "k_diffusion"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "k_diffusion"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "k_diffusion"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "k_diffusion"])
diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py", "repo_id": "diffusers", "token_count": 451 }
135
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with package versions """ import importlib.metadata import operator import re import sys from typing import Optional from packaging import version ops = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): if got_ver is None or want_ver is None: raise ValueError( f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" f" reinstalling {pkg}." ) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the *site-packages* dir via *importlib.metadata*. Args: requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (`str`, *optional*): what suggestion to print in case of requirements not being met Example: ```python require_version("pandas>1.1.2") require_version("numpy>1.18.5", "this is important to have for whatever reason") ```""" hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f" got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") # there could be multiple requirements wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f" but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return # check if any version is installed try: got_ver = importlib.metadata.version(pkg) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def require_version_core(requirement): """require_version wrapper which emits a core-specific hint on failure""" hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(requirement, hint)
diffusers/src/diffusers/utils/versions.py/0
{ "file_path": "diffusers/src/diffusers/utils/versions.py", "repo_id": "diffusers", "token_count": 1699 }
136
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU from diffusers.models.embeddings import get_timestep_embedding from diffusers.models.lora import LoRACompatibleLinear from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from diffusers.models.transformers.transformer_2d import Transformer2DModel from diffusers.utils.testing_utils import ( backend_manual_seed, require_torch_accelerator_with_fp64, torch_device, ) class EmbeddingsTests(unittest.TestCase): def test_timestep_embeddings(self): embedding_dim = 256 timesteps = torch.arange(16) t1 = get_timestep_embedding(timesteps, embedding_dim) # first vector should always be composed only of 0's and 1's assert (t1[0, : embedding_dim // 2] - 0).abs().sum() < 1e-5 assert (t1[0, embedding_dim // 2 :] - 1).abs().sum() < 1e-5 # last element of each vector should be one assert (t1[:, -1] - 1).abs().sum() < 1e-5 # For large embeddings (e.g. 128) the frequency of every vector is higher # than the previous one which means that the gradients of later vectors are # ALWAYS higher than the previous ones grad_mean = np.abs(np.gradient(t1, axis=-1)).mean(axis=1) prev_grad = 0.0 for grad in grad_mean: assert grad > prev_grad prev_grad = grad def test_timestep_defaults(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim) t2 = get_timestep_embedding( timesteps, embedding_dim, flip_sin_to_cos=False, downscale_freq_shift=1, max_period=10_000 ) assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3) def test_timestep_flip_sin_cos(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=True) t1 = torch.cat([t1[:, embedding_dim // 2 :], t1[:, : embedding_dim // 2]], dim=-1) t2 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=False) assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3) def test_timestep_downscale_freq_shift(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0) t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1) # get cosine half (vectors that are wrapped into cosine) cosine_half = (t1 - t2)[:, embedding_dim // 2 :] # cosine needs to be negative assert (np.abs((cosine_half <= 0).numpy()) - 1).sum() < 1e-5 def test_sinoid_embeddings_hardcoded(self): embedding_dim = 64 timesteps = torch.arange(128) # standard unet, score_vde t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1, flip_sin_to_cos=False) # glide, ldm t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0, flip_sin_to_cos=True) # grad-tts t3 = get_timestep_embedding(timesteps, embedding_dim, scale=1000) assert torch.allclose( t1[23:26, 47:50].flatten().cpu(), torch.tensor([0.9646, 0.9804, 0.9892, 0.9615, 0.9787, 0.9882, 0.9582, 0.9769, 0.9872]), 1e-3, ) assert torch.allclose( t2[23:26, 47:50].flatten().cpu(), torch.tensor([0.3019, 0.2280, 0.1716, 0.3146, 0.2377, 0.1790, 0.3272, 0.2474, 0.1864]), 1e-3, ) assert torch.allclose( t3[23:26, 47:50].flatten().cpu(), torch.tensor([-0.9801, -0.9464, -0.9349, -0.3952, 0.8887, -0.9709, 0.5299, -0.2853, -0.9927]), 1e-3, ) class Upsample2DBlockTests(unittest.TestCase): def test_upsample_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=False) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_conv(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=True) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.7145, 1.3773, 0.3492, 0.8448, 1.0839, -0.3341, 0.5956, 0.1250, -0.4841]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_conv_out_dim(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=True, out_channels=64) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 64, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.2703, 0.1656, -0.2538, -0.0553, -0.2984, 0.1044, 0.1155, 0.2579, 0.7755]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_transpose(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=False, use_conv_transpose=True) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.3028, -0.1582, 0.0071, 0.0350, -0.4799, -0.1139, 0.1056, -0.1153, -0.1046]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class Downsample2DBlockTests(unittest.TestCase): def test_downsample_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=False) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.0513, -0.3889, 0.0640, 0.0836, -0.5460, -0.0341, -0.0169, -0.6967, 0.1179]) max_diff = (output_slice.flatten() - expected_slice).abs().sum().item() assert max_diff <= 1e-3 # assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1) def test_downsample_with_conv(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913], ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_downsample_with_conv_pad1(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True, padding=1) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_downsample_with_conv_out_dim(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True, out_channels=16) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 16, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class ResnetBlock2DTests(unittest.TestCase): def test_resnet_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 64, 64) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9010, -0.2974, -0.8245, -1.3533, 0.8742, -0.9645, -2.0584, 1.3387, -0.4746], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_use_in_shortcut(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, use_in_shortcut=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 64, 64) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.2226, -1.0791, -0.1629, 0.3659, -0.2889, -1.2376, 0.0582, 0.9206, 0.0044], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_resnet_up(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, up=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 128, 128) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [1.2130, -0.8753, -0.9027, 1.5783, -0.5362, -0.5001, 1.0726, -0.7732, -0.4182], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_resnet_down(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_kernel_fir(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="fir", down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.0934, -0.5729, 0.0909, -0.2710, -0.5044, 0.0243, -0.0665, -0.5267, -0.3136], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_kernel_sde_vp(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="sde_vp", down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class Transformer2DModelTests(unittest.TestCase): def test_spatial_transformer_default(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 32, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=32, num_attention_heads=1, attention_head_dim=32, dropout=0.0, cross_attention_dim=None, ).to(torch_device) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, 32, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9455, -0.0066, -1.3933, -1.5878, 0.5325, -0.6486, -1.8648, 0.7515, -0.9689], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_cross_attention_dim(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 64, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=64, num_attention_heads=2, attention_head_dim=32, dropout=0.0, cross_attention_dim=64, ).to(torch_device) with torch.no_grad(): context = torch.randn(1, 4, 64).to(torch_device) attention_scores = spatial_transformer_block(sample, context).sample assert attention_scores.shape == (1, 64, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.0143, -0.6909, -2.1547, -1.8893, 1.4097, 0.1359, -0.2521, -1.3359, 0.2598], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_timestep(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_embeds_ada_norm = 5 sample = torch.randn(1, 64, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=64, num_attention_heads=2, attention_head_dim=32, dropout=0.0, cross_attention_dim=64, num_embeds_ada_norm=num_embeds_ada_norm, ).to(torch_device) with torch.no_grad(): timestep_1 = torch.tensor(1, dtype=torch.long).to(torch_device) timestep_2 = torch.tensor(2, dtype=torch.long).to(torch_device) attention_scores_1 = spatial_transformer_block(sample, timestep=timestep_1).sample attention_scores_2 = spatial_transformer_block(sample, timestep=timestep_2).sample assert attention_scores_1.shape == (1, 64, 64, 64) assert attention_scores_2.shape == (1, 64, 64, 64) output_slice_1 = attention_scores_1[0, -1, -3:, -3:] output_slice_2 = attention_scores_2[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3923, -1.0923, -1.7144, -1.5570, 1.4154, 0.1738, -0.1157, -1.2998, -0.1703], device=torch_device ) expected_slice_2 = torch.tensor( [-0.4311, -1.1376, -1.7732, -1.5997, 1.3450, 0.0964, -0.1569, -1.3590, -0.2348], device=torch_device ) assert torch.allclose(output_slice_1.flatten(), expected_slice, atol=1e-3) assert torch.allclose(output_slice_2.flatten(), expected_slice_2, atol=1e-3) def test_spatial_transformer_dropout(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 32, 64, 64).to(torch_device) spatial_transformer_block = ( Transformer2DModel( in_channels=32, num_attention_heads=2, attention_head_dim=16, dropout=0.3, cross_attention_dim=None, ) .to(torch_device) .eval() ) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, 32, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9380, -0.0083, -1.3771, -1.5819, 0.5209, -0.6441, -1.8545, 0.7563, -0.9615], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) @require_torch_accelerator_with_fp64 def test_spatial_transformer_discrete(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_embed = 5 sample = torch.randint(0, num_embed, (1, 32)).to(torch_device) spatial_transformer_block = ( Transformer2DModel( num_attention_heads=1, attention_head_dim=32, num_vector_embeds=num_embed, sample_size=16, ) .to(torch_device) .eval() ) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, num_embed - 1, 32) output_slice = attention_scores[0, -2:, -3:] expected_slice = torch.tensor([-1.7648, -1.0241, -2.0985, -1.8035, -1.6404, -1.2098], device=torch_device) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_default_norm_layers(self): spatial_transformer_block = Transformer2DModel(num_attention_heads=1, attention_head_dim=32, in_channels=32) assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == nn.LayerNorm assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm def test_spatial_transformer_ada_norm_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, num_embeds_ada_norm=5, ) assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == AdaLayerNorm assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm def test_spatial_transformer_default_ff_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, ) assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == GEGLU assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear dim = 32 inner_dim = 128 # First dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim # NOTE: inner_dim * 2 because GEGLU assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim * 2 # Second dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim def test_spatial_transformer_geglu_approx_ff_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, activation_fn="geglu-approximate", ) assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == ApproximateGELU assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear dim = 32 inner_dim = 128 # First dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim # Second dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim def test_spatial_transformer_attention_bias(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, attention_bias=True ) assert spatial_transformer_block.transformer_blocks[0].attn1.to_q.bias is not None assert spatial_transformer_block.transformer_blocks[0].attn1.to_k.bias is not None assert spatial_transformer_block.transformer_blocks[0].attn1.to_v.bias is not None
diffusers/tests/models/test_layers_utils.py/0
{ "file_path": "diffusers/tests/models/test_layers_utils.py", "repo_id": "diffusers", "token_count": 10696 }
137
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers") class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): simple_backend = find_backend(" if not is_torch_available():") self.assertEqual(simple_backend, "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(double_backend, "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(triple_backend, "torch_and_transformers_and_onnx") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("torch_and_transformers", objects) self.assertIn("flax_and_transformers", objects) self.assertIn("torch_and_transformers_and_onnx", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel", objects["torch"]) self.assertIn("FlaxUNet2DConditionModel", objects["flax"]) self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
diffusers/tests/others/test_check_dummies.py/0
{ "file_path": "diffusers/tests/others/test_check_dummies.py", "repo_id": "diffusers", "token_count": 1873 }
138
import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffVideoToVideoPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, UNet2DConditionModel, UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffVideoToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AnimateDiffVideoToVideoPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = VIDEO_TO_VIDEO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=(32, 64), motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) video_height = 32 video_width = 32 video_num_frames = 2 video = [Image.new("RGB", (video_width, video_height))] * video_num_frames inputs = { "video": video, "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffVideoToVideoPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device) pipe(**inputs) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py/0
{ "file_path": "diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py", "repo_id": "diffusers", "token_count": 4546 }
139
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLImg2ImgFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, skip_first_text_encoder=False): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): controlnet_embedder_scale_factor = 2 image = floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": image, } return inputs def test_stable_diffusion_xl_controlnet_img2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_controlnet_img2img_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] assert output.images.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py", "repo_id": "diffusers", "token_count": 5979 }
140
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DiTPipeline params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) vae = AutoencoderKL() scheduler = DDIMScheduler() components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @nightly @require_torch_gpu class DiTPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dit_256(self): generator = torch.manual_seed(0) pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to("cuda") words = ["vase", "umbrella", "white shark", "white wolf"] ids = pipe.get_label_ids(words) images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max()) < 1e-2 def test_dit_512(self): pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") words = ["vase", "umbrella"] ids = pipe.get_label_ids(words) generator = torch.manual_seed(0) images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max()) < 1e-1
diffusers/tests/pipelines/dit/test_dit.py/0
{ "file_path": "diffusers/tests/pipelines/dit/test_dit.py", "repo_id": "diffusers", "token_count": 2339 }
141
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintPipeline params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds", "masked_image", "mask_image"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @is_flaky() def test_model_cpu_offload_forward_pass(self): super().test_inference_batch_single_identical(expected_max_diff=8e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) # override default test because we need to zero out mask too in order to make sure final latent is all zero def test_callback_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22InpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py", "repo_id": "diffusers", "token_count": 5423 }
142
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import ( floats_tensor, is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): # TODO: is there an appropriate internal test set? hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx" def get_dummy_inputs(self, seed=0): image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddpm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6957, 0.7002, 0.7186, 0.6881, 0.6693, 0.6910, 0.7445, 0.7274, 0.7056]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.7349, 0.7347, 0.7034, 0.7696, 0.7876, 0.7597, 0.7916, 0.8085, 0.8036]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_ddpm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) # using the PNDM scheduler by default pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=lms_scheduler, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py", "repo_id": "diffusers", "token_count": 3918 }
143
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import shutil import unittest from collections import OrderedDict from pathlib import Path import torch from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ControlNetModel, DiffusionPipeline, ) from diffusers.pipelines.auto_pipeline import ( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING, ) from diffusers.utils.testing_utils import slow PRETRAINED_MODEL_REPO_MAPPING = OrderedDict( [ ("stable-diffusion", "runwayml/stable-diffusion-v1-5"), ("if", "DeepFloyd/IF-I-XL-v1.0"), ("kandinsky", "kandinsky-community/kandinsky-2-1"), ("kandinsky22", "kandinsky-community/kandinsky-2-2-decoder"), ] ) class AutoPipelineFastTest(unittest.TestCase): def test_from_pipe_consistent(self): pipe = AutoPipelineForText2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False ) original_config = dict(pipe.config) pipe = AutoPipelineForImage2Image.from_pipe(pipe) assert dict(pipe.config) == original_config pipe = AutoPipelineForText2Image.from_pipe(pipe) assert dict(pipe.config) == original_config def test_from_pipe_override(self): pipe = AutoPipelineForText2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False ) pipe = AutoPipelineForImage2Image.from_pipe(pipe, requires_safety_checker=True) assert pipe.config.requires_safety_checker is True pipe = AutoPipelineForText2Image.from_pipe(pipe, requires_safety_checker=True) assert pipe.config.requires_safety_checker is True def test_from_pipe_consistent_sdxl(self): pipe = AutoPipelineForImage2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-xl-pipe", requires_aesthetics_score=True, force_zeros_for_empty_prompt=False, ) original_config = dict(pipe.config) pipe = AutoPipelineForText2Image.from_pipe(pipe) pipe = AutoPipelineForImage2Image.from_pipe(pipe) assert dict(pipe.config) == original_config def test_kwargs_local_files_only(self): repo = "hf-internal-testing/tiny-stable-diffusion-torch" tmpdirname = DiffusionPipeline.download(repo) tmpdirname = Path(tmpdirname) # edit commit_id to so that it's not the latest commit commit_id = tmpdirname.name new_commit_id = commit_id + "hug" ref_dir = tmpdirname.parent.parent / "refs/main" with open(ref_dir, "w") as f: f.write(new_commit_id) new_tmpdirname = tmpdirname.parent / new_commit_id os.rename(tmpdirname, new_tmpdirname) try: AutoPipelineForText2Image.from_pretrained(repo, local_files_only=True) except OSError: assert False, "not able to load local files" shutil.rmtree(tmpdirname.parent.parent) def test_from_pipe_controlnet_text2img(self): pipe = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionImg2ImgPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_inpaint(self): pipe = AutoPipelineForInpainting.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionInpaintPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_new_task(self): pipe_text2img = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_text2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=None) assert pipe_inpaint.__class__.__name__ == "StableDiffusionInpaintPipeline" assert "controlnet" not in pipe_inpaint.components # testing `from_pipe` for text2img controlnet ## 1. from a different controlnet pipe, without controlnet argument pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components ## 2. from a different controlnet pipe, with controlnet argument pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components ## 3. from same controlnet pipeline class, with a different controlnet component pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components # testing from_pipe for inpainting ## 1. from a different controlnet pipeline class pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components ## from a different controlnet pipe, with a different controlnet pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components ## from same controlnet pipe, with a different controlnet pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_inpaint, controlnet=controlnet) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components # testing from_pipe from img2img controlnet ## from a different controlnet pipe, without controlnet argument pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components # from a different controlnet pipe, with a different controlnet component pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components # from same controlnet pipeline class, with a different controlnet pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components @slow class AutoPipelineIntegrationTest(unittest.TestCase): def test_pipe_auto(self): for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): # test txt2img pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) if "kandinsky" not in model_name: pipe_to = AutoPipelineForInpainting.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_txt2img, pipe_to gc.collect() # test img2img pipe_img2img = AutoPipelineForImage2Image.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) if "kandinsky" not in model_name: pipe_to = AutoPipelineForInpainting.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_img2img, pipe_to gc.collect() # test inpaint if "kandinsky" not in model_name: pipe_inpaint = AutoPipelineForInpainting.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForInpainting.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_inpaint, pipe_to gc.collect() def test_from_pipe_consistent(self): for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): if model_name in ["kandinsky", "kandinsky22"]: auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image] else: auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image, AutoPipelineForInpainting] # test from_pretrained for pipe_from_class in auto_pipes: pipe_from = pipe_from_class.from_pretrained(model_repo, variant="fp16", torch_dtype=torch.float16) pipe_from_config = dict(pipe_from.config) for pipe_to_class in auto_pipes: pipe_to = pipe_to_class.from_pipe(pipe_from) self.assertEqual(dict(pipe_to.config), pipe_from_config) del pipe_from, pipe_to gc.collect() def test_controlnet(self): # test from_pretrained model_repo = "runwayml/stable-diffusion-v1-5" controlnet_repo = "lllyasviel/sd-controlnet-canny" controlnet = ControlNetModel.from_pretrained(controlnet_repo, torch_dtype=torch.float16) pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) pipe_img2img = AutoPipelineForImage2Image.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) pipe_inpaint = AutoPipelineForInpainting.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) # test from_pipe for pipe_from in [pipe_txt2img, pipe_img2img, pipe_inpaint]: pipe_to = AutoPipelineForText2Image.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_txt2img.config)) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_img2img.config)) pipe_to = AutoPipelineForInpainting.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_inpaint.config))
diffusers/tests/pipelines/test_pipelines_auto.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipelines_auto.py", "repo_id": "diffusers", "token_count": 6542 }
144
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenCombinedPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", "return_dict", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = {"c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2} model = WuerstchenPrior(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_prior_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior prior_text_encoder = self.dummy_prior_text_encoder scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan components = { "tokenizer": tokenizer, "text_encoder": text_encoder, "decoder": decoder, "vqgan": vqgan, "scheduler": scheduler, "prior_prior": prior, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": tokenizer, "prior_scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_wuerstchen(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @unittest.skip(reason="flakey and float16 requires CUDA") def test_float16_inference(self): super().test_float16_inference() def test_callback_inputs(self): pass def test_callback_cfg(self): pass
diffusers/tests/pipelines/wuerstchen/test_wuerstchen_combined.py/0
{ "file_path": "diffusers/tests/pipelines/wuerstchen/test_wuerstchen_combined.py", "repo_id": "diffusers", "token_count": 3552 }
145
import torch from diffusers import EulerAncestralDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (EulerAncestralDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 152.3192) < 1e-2 assert abs(result_mean.item() - 0.1983) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 108.4439) < 1e-2 assert abs(result_mean.item() - 0.1412) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 152.3192) < 1e-2 assert abs(result_mean.item() - 0.1983) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) t_start = self.num_inference_steps - 2 scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 56163.0508) < 1e-2, f" expected result sum 56163.0508, but get {result_sum}" assert abs(result_mean.item() - 73.1290) < 1e-3, f" expected result mean 73.1290, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_euler_ancestral.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_euler_ancestral.py", "repo_id": "diffusers", "token_count": 2492 }
146
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob import os import re import subprocess # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py DIFFUSERS_PATH = "src/diffusers" REPO_PATH = "." def _should_continue(line, indent): return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None def find_code_in_diffusers(object_name): """Find and return the code source code of `object_name`.""" parts = object_name.split(".") i = 0 # First let's find the module where our object lives. module = parts[i] while i < len(parts) and not os.path.isfile(os.path.join(DIFFUSERS_PATH, f"{module}.py")): i += 1 if i < len(parts): module = os.path.join(module, parts[i]) if i >= len(parts): raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.") with open( os.path.join(DIFFUSERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n", ) as f: lines = f.readlines() # Now let's find the class / func in the code! indent = "" line_index = 0 for name in parts[i + 1 :]: while ( line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lines): raise ValueError(f" {object_name} does not match any function or class in {module}.") # We found the beginning of the class / func, now let's find the end (when the indent diminishes). start_index = line_index while line_index < len(lines) and _should_continue(lines[line_index], indent): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 code_lines = lines[start_index:line_index] return "".join(code_lines) _re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") _re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") _re_fill_pattern = re.compile(r"<FILL\s+[^>]*>") def get_indent(code): lines = code.split("\n") idx = 0 while idx < len(lines) and len(lines[idx]) == 0: idx += 1 if idx < len(lines): return re.search(r"^(\s*)\S", lines[idx]).groups()[0] return "" def run_ruff(code): command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, _ = process.communicate(input=code.encode()) return stdout.decode() def stylify(code: str) -> str: """ Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. As `ruff` does not provide a python api this cannot be done on the fly. Args: code (`str`): The code to format. Returns: `str`: The formatted code. """ has_indent = len(get_indent(code)) > 0 if has_indent: code = f"class Bla:\n{code}" formatted_code = run_ruff(code) return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code def is_copy_consistent(filename, overwrite=False): """ Check if the code commented as a copy in `filename` matches the original. Return the differences or overwrites the content depending on `overwrite`. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() diffs = [] line_index = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lines): search = _re_copy_warning.search(lines[line_index]) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. indent, object_name, replace_pattern = search.groups() theoretical_code = find_code_in_diffusers(object_name) theoretical_indent = get_indent(theoretical_code) start_index = line_index + 1 if indent == theoretical_indent else line_index + 2 indent = theoretical_indent line_index = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. should_continue = True while line_index < len(lines) and should_continue: line_index += 1 if line_index >= len(lines): break line = lines[line_index] should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 observed_code_lines = lines[start_index:line_index] observed_code = "".join(observed_code_lines) # Remove any nested `Copied from` comments to avoid circular copies theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None] theoretical_code = "\n".join(theoretical_code) # Before comparing, use the `replace_pattern` on the original code. if len(replace_pattern) > 0: patterns = replace_pattern.replace("with", "").split(",") patterns = [_re_replace_pattern.search(p) for p in patterns] for pattern in patterns: if pattern is None: continue obj1, obj2, option = pattern.groups() theoretical_code = re.sub(obj1, obj2, theoretical_code) if option.strip() == "all-casing": theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code) theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code) # stylify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line theoretical_code = stylify(lines[start_index - 1] + theoretical_code) theoretical_code = theoretical_code[len(lines[start_index - 1]) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index]) if overwrite: lines = lines[:start_index] + [theoretical_code] + lines[line_index:] line_index = start_index + 1 if overwrite and len(diffs) > 0: # Warn the user a file has been modified. print(f"Detected changes, rewriting {filename}.") with open(filename, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) return diffs def check_copies(overwrite: bool = False): all_files = glob.glob(os.path.join(DIFFUSERS_PATH, "**/*.py"), recursive=True) diffs = [] for filename in all_files: new_diffs = is_copy_consistent(filename, overwrite) diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] if not overwrite and len(diffs) > 0: diff = "\n".join(diffs) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.", ) args = parser.parse_args() check_copies(args.fix_and_overwrite)
diffusers/utils/check_copies.py/0
{ "file_path": "diffusers/utils/check_copies.py", "repo_id": "diffusers", "token_count": 3398 }
147
<jupyter_start><jupyter_text>DDIM InversionIn this notebook we will explore **inversion**, see how it relates to sampling, and apply it to the task of editing images with Stable Diffusion. What You Will Learn- How DDIM sampling works- Deterministic vs Stochastic samplers- The theory behind DDIM inversion- Editing images with inversion Let's get started! Setup<jupyter_code># !pip install -q transformers diffusers accelerate import torch import requests import torch.nn as nn import torch.nn.functional as F from PIL import Image from io import BytesIO from tqdm.auto import tqdm from matplotlib import pyplot as plt from torchvision import transforms as tfms from diffusers import StableDiffusionPipeline, DDIMScheduler # Useful function for later def load_image(url, size=None): response = requests.get(url,timeout=0.2) img = Image.open(BytesIO(response.content)).convert('RGB') if size is not None: img = img.resize(size) return img device = torch.device("cuda" if torch.cuda.is_available() else "cpu")<jupyter_output><empty_output><jupyter_text>Loading an existing pipeline<jupyter_code># Load a pipeline pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(device) # Set up a DDIM scheduler: pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) # Sample an image to make sure it is all working prompt = 'Beautiful DSLR Photograph of a penguin on the beach, golden hour' negative_prompt = 'blurry, ugly, stock photo' im = pipe(prompt, negative_prompt=negative_prompt).images[0] im.resize((256, 256)) # resize for convenient viewing<jupyter_output><empty_output><jupyter_text>DDIM Sampling At a given time $t$, the noisy image $x_t$ is some mixture of the original image ($x_0$) and some noise ($\epsilon$). Here is the formula for $x_t$ from the DDIM paper, which we'll be referring to in this section:$$ x_t = \sqrt{\alpha_t}x_0 + \sqrt{1-\alpha_t}\epsilon $$$\epsilon$ is some gaussian noise with unit variance$\alpha_t$ ('alpha') is the value which is confusingly called $\bar{\alpha}$ ('alpha_bar') in the DDPM paper (!!) and defined the noise scheduler. In diffusers, the alpha scheduler is calculated and the values are stored in the `scheduler.alphas_cumprod`. Confusing I know! Let's plot these values, and remember that for the rest of this notebook we'll use DDIM's notation.<jupyter_code># Plot 'alpha' (alpha_bar in DDPM language, alphas_cumprod in diffusers for clarity) timesteps = pipe.scheduler.timesteps.cpu() alphas = pipe.scheduler.alphas_cumprod[timesteps] plt.plot(timesteps, alphas, label='alpha_t'); plt.legend();<jupyter_output><empty_output><jupyter_text>Initially (timestep 0, left side of the graph) we begin with a clean image and no noise. $\alpha_t = 1$. As we move to higher timesteps, we end up with almost all noise and $\alpha_t$ drops towards 0. During sampling, we begin with pure noise at timestep 1000 and slowly move towards timestep 0. To calculate the next t in the sampling trajectory ($x_{t-1}$ since we're moving from high t to low t) we predict the noise ($\epsilon_\theta(x_t)$, which is the output of our model) and use this to calculate the predicted denoised image $x_0$. Then we use this prediction to move a small distance in the 'direction pointing to $x_t$'. Finally, we can add some additional noise scaled by $\sigma_t$. Here's the relevant section from the paper showing this in action: So, we have an equation for how to move from $x_t$ to $x_{t-1}$, with a controllable abount of noise. And today we're specifically interested in the case where we don't add any additional noise - giving us fully deterministic DDIM sampling. Let's see what this looks like in code:<jupyter_code># Sample function (regular DDIM) @torch.no_grad() def sample(prompt, start_step=0, start_latents=None, guidance_scale=3.5, num_inference_steps=30, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt='', device=device): # Encode prompt text_embeddings = pipe._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # Set num inference steps pipe.scheduler.set_timesteps(num_inference_steps, device=device) # Create a random starting point if we don't have one already if start_latents is None: start_latents = torch.randn(1, 4, 64, 64, device=device) start_latents *= pipe.scheduler.init_noise_sigma latents = start_latents.clone() for i in tqdm(range(start_step, num_inference_steps)): t = pipe.scheduler.timesteps[i] # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # Normally we'd rely on the scheduler to handle the update step: # latents = pipe.scheduler.step(noise_pred, t, latents).prev_sample # Instead, let's do it ourselves: prev_t = max(1, t.item() - (1000//num_inference_steps)) # t-1 alpha_t = pipe.scheduler.alphas_cumprod[t.item()] alpha_t_prev = pipe.scheduler.alphas_cumprod[prev_t] predicted_x0 = (latents - (1-alpha_t).sqrt()*noise_pred) / alpha_t.sqrt() direction_pointing_to_xt = (1-alpha_t_prev).sqrt()*noise_pred latents = alpha_t_prev.sqrt()*predicted_x0 + direction_pointing_to_xt # Post-processing images = pipe.decode_latents(latents) images = pipe.numpy_to_pil(images) return images # Test our sampling function by generating an image sample('Watercolor painting of a beach sunset', negative_prompt=negative_prompt, num_inference_steps=50)[0].resize((256, 256))<jupyter_output><empty_output><jupyter_text>See if you can match the code with the equation from the paper. Note that $\sigma$=0 since we're only interested in the no-extra-noise case, so we can leave out those bits of the equation. InversionThe goal of inversion is to 'reverse' the sampling process. We want to end up with a noisy latent which, if used as the starting point for our usual sampling procedure, results in the original image being generated. Here we load an image as our initial image, but you can also generate one yourself to use instead.<jupyter_code># https://www.pexels.com/photo/a-beagle-on-green-grass-field-8306128/ input_image = load_image('https://images.pexels.com/photos/8306128/pexels-photo-8306128.jpeg', size=(512, 512)) input_image<jupyter_output><empty_output><jupyter_text>We're also going to use a prompt to do the inversion with classifier-free-guidance included, so enter a description of the image:<jupyter_code>input_image_prompt = "Photograph of a puppy on the grass"<jupyter_output><empty_output><jupyter_text>Next, we need to turn this PIL image into a set of latents which we will use as the starting point for our inversion:<jupyter_code># encode with VAE with torch.no_grad(): latent = pipe.vae.encode(tfms.functional.to_tensor(input_image).unsqueeze(0).to(device)*2-1) l = 0.18215 * latent.latent_dist.sample()<jupyter_output><empty_output><jupyter_text>Alright, time for the fun bit. This function looks similar to the sampling function above, but we move through the timesteps in the opposite direction, starting at t=0 and moving towards higher and higher noise. And instead of updating our latents to be less noisy, we estimate the predicted noise and use it to UNDO an update step, moving them from t to t+1.<jupyter_code>## Inversion @torch.no_grad() def invert(start_latents, prompt, guidance_scale=3.5, num_inference_steps=80, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt='', device=device): # Encode prompt text_embeddings = pipe._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # latents are now the specified start latents latents = start_latents.clone() # We'll keep a list of the inverted latents as the process goes on intermediate_latents = [] # Set num inference steps pipe.scheduler.set_timesteps(num_inference_steps, device=device) # Reversed timesteps <<<<<<<<<<<<<<<<<<<< timesteps = reversed(pipe.scheduler.timesteps) for i in tqdm(range(1, num_inference_steps), total=num_inference_steps-1): # We'll skip the final iteration if i >= num_inference_steps - 1: continue t = timesteps[i] # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) current_t = max(0, t.item() - (1000//num_inference_steps))#t next_t = t # min(999, t.item() + (1000//num_inference_steps)) # t+1 alpha_t = pipe.scheduler.alphas_cumprod[current_t] alpha_t_next = pipe.scheduler.alphas_cumprod[next_t] # Inverted update step (re-arranging the update step to get x(t) (new latents) as a function of x(t-1) (current latents) latents = (latents - (1-alpha_t).sqrt()*noise_pred)*(alpha_t_next.sqrt()/alpha_t.sqrt()) + (1-alpha_t_next).sqrt()*noise_pred # Store intermediate_latents.append(latents) return torch.cat(intermediate_latents)<jupyter_output><empty_output><jupyter_text>Running it on the latent representation of our puppy pic, we get back a set of all the intermediate latents created during the inversion process:<jupyter_code>inverted_latents = invert(l, input_image_prompt,num_inference_steps=50) inverted_latents.shape<jupyter_output><empty_output><jupyter_text>We can view the final set of latents - these will hopefully be the noisy starting point for our new sampling attempts:<jupyter_code># Decode the final inverted latents: with torch.no_grad(): im = pipe.decode_latents(inverted_latents[-1].unsqueeze(0)) pipe.numpy_to_pil(im)[0]<jupyter_output><empty_output><jupyter_text>You can pass these inverted latents to the pipeline using the normal __call__ method:<jupyter_code>pipe(input_image_prompt, latents=inverted_latents[-1][None], num_inference_steps=50, guidance_scale=3.5).images[0]<jupyter_output><empty_output><jupyter_text>But here we see our first problem: this is **not quite the image we started with**! This is because DDIM inversion relies on a critical assumption that the noise prediction at time t and at time t+1 will be the same - something that is not true when we only invert over 50 or 100 timesteps. We could use more timesteps to hopefully get a more accurate inversion, but we can also 'cheat' and start from, say, 20/50 steps through sampling with the corresponding intermediate latents we saved during inversion:<jupyter_code># The reason we want to be able to specify start step start_step=20 sample(input_image_prompt, start_latents=inverted_latents[-(start_step+1)][None], start_step=start_step, num_inference_steps=50)[0]<jupyter_output><empty_output><jupyter_text>Pretty close to our input image! Why are we doing this? Well, the hope is that if we now sample with a new prompt we'll get an oimage that matches the original EXCEPT in places relevant to the new prompt. For ex, replacing 'puppy' with 'cat' we should see a cat with a near-identical lawn and backgorund:<jupyter_code># Sampling with a new prompt start_step=10 new_prompt = input_image_prompt.replace('puppy', 'cat') sample(new_prompt, start_latents=inverted_latents[-(start_step+1)][None], start_step=start_step, num_inference_steps=50)[0]<jupyter_output><empty_output><jupyter_text>Why not just use img2img?Why bother inverting? Can't we just add noise to the input image and denoise with the new prompt? We can, but this will result in much more drastic changes everywhere (if we add lots of noise) or not enough changes anywhere (if we add less noise). Try it yourself:<jupyter_code>start_step = 10 num_inference_steps=50 pipe.scheduler.set_timesteps(num_inference_steps) noisy_l = pipe.scheduler.add_noise(l, torch.randn_like(l), pipe.scheduler.timesteps[start_step]) sample(new_prompt, start_latents=noisy_l, start_step=start_step, num_inference_steps=num_inference_steps)[0]<jupyter_output><empty_output><jupyter_text>Note the much-larger change to the lawn and background. Putting it all togetherLet's wrap the code we've written so far into a simple function that takes an image and two prompts and performs an edit using inversion:<jupyter_code>def edit(input_image, input_image_prompt, edit_prompt, num_steps=100, start_step=30, guidance_scale=3.5): with torch.no_grad(): latent = pipe.vae.encode(tfms.functional.to_tensor(input_image).unsqueeze(0).to(device)*2-1) l = 0.18215 * latent.latent_dist.sample() inverted_latents = invert(l, input_image_prompt,num_inference_steps=num_steps) final_im = sample(edit_prompt, start_latents=inverted_latents[-(start_step+1)][None], start_step=start_step, num_inference_steps=num_steps, guidance_scale=guidance_scale)[0] return final_im<jupyter_output><empty_output><jupyter_text>And in action:<jupyter_code>edit(input_image, 'A puppy on the grass', 'an old grey dog on the grass', num_steps=50, start_step=10) edit(input_image, 'A puppy on the grass', 'A blue dog on the lawn', num_steps=50, start_step=12, guidance_scale=6) # Exercise: Try this on some more images! Explore the different parameters<jupyter_output><empty_output><jupyter_text>More Steps = Better PerformanceIf you've having issues with less-accurate inversions, you can try using more steps (at the cost of longer running time). To test the inversion you can use our edit function with the same prompt:<jupyter_code># Inversion test with far more steps: edit(input_image, 'A puppy on the grass', 'A puppy on the grass', num_steps=350, start_step=1)<jupyter_output><empty_output><jupyter_text>Much better! And trying it for an edit:<jupyter_code>edit(input_image, 'A photograph of a puppy', 'A photograph of a grey cat', num_steps=150, start_step=30, guidance_scale=5.5) # source: https://www.pexels.com/photo/girl-taking-photo-1493111/ face = load_image('https://images.pexels.com/photos/1493111/pexels-photo-1493111.jpeg', size=(512, 512)) face edit(face, 'A photograph of a face', 'A photograph of a face with sunglasses', num_steps=250, start_step=30, guidance_scale=3.5) edit(face, 'A photograph of a face', 'Acrylic palette knife painting of a face, colorful', num_steps=250, start_step=65, guidance_scale=5.5)<jupyter_output><empty_output>
diffusion-models-class/units/en/unit4/ddim_inversion.ipynb/0
{ "file_path": "diffusion-models-class/units/en/unit4/ddim_inversion.ipynb", "repo_id": "diffusion-models-class", "token_count": 5328 }
148
# Modèle de diffusion conditionné par la classe <CourseFloatingBanner unit={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Modèle de diffusion conditionné par la classe", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/fr/unit3/class_conditioned_diffusion_model_example.ipynb"}, {label: "Modèle de diffusion conditionné par la classe", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/fr/unit3/class_conditioned_diffusion_model_example.ipynb"}, ]} /> Dans ce *notebook*, nous allons illustrer une façon d'ajouter des informations de conditionnement à un modèle de diffusion. Plus précisément, nous allons entraîner un modèle de diffusion conditionné par la classe sur MNIST à la suite de l'exemple d'entraînement à partir de 0 de l'unité 1, où nous pouvons spécifier quel chiffre nous voulons que le modèle génère au moment de l'inférence. Comme indiqué dans l'introduction de cette unité, il s'agit d'une des nombreuses façons d'ajouter des informations de conditionnement supplémentaires à un modèle de diffusion, et elle a été choisie pour sa relative simplicité. Tout comme le *notebook* de l'unité 1, ce *notebook* n'a qu'un but illustratif et vous pouvez l'ignorer si vous le souhaitez. # Configuration et préparation des données ```py !pip install -q diffusers ``` ```py import torch import torchvision from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from diffusers import DDPMScheduler, UNet2DModel from matplotlib import pyplot as plt from tqdm.auto import tqdm device = 'mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu' print(f'Using device: {device}') ``` ```py # Charger le jeu de données dataset = torchvision.datasets.MNIST(root="mnist/", train=True, download=True, transform=torchvision.transforms.ToTensor()) # Introduire les données dans un chargeur de données (batch de taille 8 ici pour la démonstration) train_dataloader = DataLoader(dataset, batch_size=8, shuffle=True) # Visualiser quelques exemples x, y = next(iter(train_dataloader)) print('Input shape:', x.shape) print('Labels:', y) plt.imshow(torchvision.utils.make_grid(x)[0], cmap='Greys') ``` ```py Input shape: torch.Size([8, 1, 28, 28]) Labels: tensor([8, 1, 5, 9, 7, 6, 2, 2]) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ## Création d'une UNet conditionnée par la classe La façon dont nous introduirons le conditionnement de la classe est la suivante : - Créer un `UNet2DModel` standard avec quelques canaux d'entrée supplémentaires. - Associer l'étiquette de la classe à un vecteur appris de forme (`class_emb_size`) via une couche d'enchâssement. - Concaténer ces informations en tant que canaux supplémentaires pour l'entrée interne du UNet avec `net_input = torch.cat((x, class_cond), 1)` - Introduire ce `net_input` (qui a (`class_emb_size+1`) canaux au total) dans l'UNet pour obtenir la prédiction finale. Dans cet exemple, nous avons fixé la taille de `class_emb_size` à 4, mais c'est complètement arbitraire et vous pourriez envisager de la fixer à 1 (pour voir si cela fonctionne toujours), à 10 (pour correspondre au nombre de classes), ou de remplacer le `nn.Embedding` appris par un simple encodage à un coup de l'étiquette de la classe directement. Voici à quoi ressemble l'implémentation : ```py class ClassConditionedUNet(nn.Module): def __init__(self, num_classes=10, class_emb_size=4): super().__init__() # La couche d'intégration associe l'étiquette de la classe à un vecteur de taille `class_emb_size` self.class_emb = nn.Embedding(num_classes, class_emb_size) # Self.model est un UNet inconditionnel avec des canaux d'entrée supplémentaires pour accepter les informations de conditionnement (l'enchâssement de la classe). self.model = UNet2DModel( sample_size=28, # la résolution de l'image cible in_channels=1 + class_emb_size, # Canaux d'entrée supplémentaires pour la classe conditionnée out_channels=1, # le nombre de canaux de sortie layers_per_block=2, # le nombre de couches ResNet à utiliser par bloc UNet block_out_channels=(32, 64, 64), down_block_types=( "DownBlock2D", # un bloc de sous-échantillonnage ResNet standard "AttnDownBlock2D", # un bloc de sous-échantillonnage ResNet avec auto-attention spatiale "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", "AttnUpBlock2D", # un bloc de suréchantillonnage ResNet avec auto-attention spatiale "UpBlock2D", # un bloc de suréchantillonnage ResNet standard ), ) # Notre méthode de transfert prend maintenant les étiquettes de la classe comme argument supplémentaire def forward(self, x, t, class_labels): # Forme de x : bs, ch, w, h = x.shape # conditionnement de la classe en bon état pour l'ajouter comme canaux d'entrée supplémentaires class_cond = self.class_emb(class_labels) # Map to embedding dinemsion class_cond = class_cond.view(bs, class_cond.shape[1], 1, 1).expand(bs, class_cond.shape[1], w, h) # x est de forme (bs, 1, 28, 28) et class_cond est maintenant (bs, 4, 28, 28) # L'entrée nette est maintenant x et la classe cond concaténée ensemble le long de la dimension 1 net_input = torch.cat((x, class_cond), 1) # (bs, 5, 28, 28) # Cette information est transmise à l'UNet en même temps que le pas de temps et renvoie la prédiction return self.model(net_input, t).sample # (bs, 1, 28, 28) ``` Si l'une des formes ou des transformations vous semble confuse, ajoutez des `print()` pour afficher les formes pertinentes et vérifiez qu'elles correspondent à vos attentes. Nous avons également annoté les formes de certaines variables intermédiaires dans l'espoir de rendre les choses plus claires. ## Entraînement et échantillonnage Alors qu'auparavant nous faisions quelque chose comme `prediction = UNet(x, t)`, nous allons maintenant ajouter les bonnes étiquettes comme troisième argument (`prediction = UNet(x, t, y)`) pendant l'entraînement, et lors de l'inférence nous pouvons passer les étiquettes que nous voulons et si tout va bien le modèle devrait générer des images qui correspondent. $y$ dans ce cas est l'étiquette des chiffres MNIST, avec des valeurs de 0 à 9. La boucle d'entraînement est très similaire à l'exemple de l'unité 1. Nous prédisons maintenant le bruit (plutôt que l'image débruitée comme dans l'unité 1) pour correspondre à l'objectif attendu par le `DDPMScheduler` par défaut que nous utilisons pour ajouter du bruit pendant l'entraînement et pour générer des échantillons au moment de l'inférence. L'entraînement prend du temps. L'accélérer pourrait être un mini-projet amusant, mais la plupart d'entre vous peuvent probablement parcourir le code (et en fait tout ce *notebook*) sans l'exécuter puisque nous ne faisons qu'illustrer une idée. ```py # Créer un planificateur noise_scheduler = DDPMScheduler(num_train_timesteps=1000, beta_schedule='squaredcos_cap_v2') ``` ```py # Redéfinition du chargeur de données pour fixer la taille du batch à un niveau supérieur à la démonstration de 8 train_dataloader = DataLoader(dataset, batch_size=128, shuffle=True) # Combien de fois devrions-nous passer les données en revue ? n_epochs = 10 # Notre réseau net = ClassConditionedUNet().to(device) # Notre fonction de perte loss_fn = nn.MSELoss() # L'optimiseur opt = torch.optim.Adam(net.parameters(), lr=1e-3) # Conserver une trace des pertes pour les consulter ultérieurement losses = [] # La boucle d'entraînement for epoch in range(n_epochs): for x, y in tqdm(train_dataloader): # Obtenir des données et préparer la version corrompue x = x.to(device) * 2 - 1 # Données sur le GPU (sur (-1, 1)) y = y.to(device) noise = torch.randn_like(x) timesteps = torch.randint(0, 999, (x.shape[0],)).long().to(device) noisy_x = noise_scheduler.add_noise(x, noise, timesteps) # Obtenir la prédiction du modèle pred = net(noisy_x, timesteps, y) # Notez que nous passons les étiquettes y # Calculer la perte loss = loss_fn(pred, noise) # Quelle est la distance entre la sortie et le bruit ? # Rétropopagation et mise à jour des paramètres : opt.zero_grad() loss.backward() opt.step() # Stocker la perte pour plus tard losses.append(loss.item()) # Afficher la moyenne des 100 dernières valeurs de perte pour vous faire une idée de la progression : avg_loss = sum(losses[-100:])/100 print(f'Finished epoch {epoch}. Average of the last 100 loss values: {avg_loss:05f}') # Visualiser la courbe des pertes plt.plot(losses) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Une fois l'entraînement terminé, nous pouvons échantillonner quelques images en introduisant différentes étiquettes comme conditionnement : ```py # Préparer un x aléatoire comme point de départ, ainsi que les étiquettes souhaitées y x = torch.randn(80, 1, 28, 28).to(device) y = torch.tensor([[i]*8 for i in range(10)]).flatten().to(device) # Boucle d'échantillonnage for i, t in tqdm(enumerate(noise_scheduler.timesteps)): # Obtenir la prédiction du modèle with torch.no_grad(): residual = net(x, t, y) # Notez à nouveau que nous transmettons nos étiquettes y # Mise à jour de l'échantillon avec l'étape x = noise_scheduler.step(residual, t, x).prev_sample # Montrer les résultats fig, ax = plt.subplots(1, 1, figsize=(12, 12)) ax.imshow(torchvision.utils.make_grid(x.detach().cpu().clip(-1, 1), nrow=8)[0], cmap='Greys') ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Nous y voilà ! Nous pouvons maintenant contrôler les images produites. Nous espérons que cet exemple vous a plu. Comme toujours, n'hésitez pas à poser des questions sur Discord. <Tip> ✏️ *A votre tour !* essayez de refaire la même chose avec FashionMNIST. Modifiez le taux d'apprentissage, la taille du batch et le nombre d'époques. Pouvez-vous obtenir des images de mode décentes avec moins de temps d'entraînement que dans l'exemple ci-dessus ? </Tip>
diffusion-models-class/units/fr/unit2/3.mdx/0
{ "file_path": "diffusion-models-class/units/fr/unit2/3.mdx", "repo_id": "diffusion-models-class", "token_count": 4505 }
149
# Débruitage inverse des modèles de diffusion implicites (DDIM) <CourseFloatingBanner unit={4} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Débruitage inverse des modèles de diffusion implicites (DDIM)", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/fr/unit4/ddim_inversion.ipynb"}, {label: "Débruitage inverse des modèles de diffusion implicites (DDIM)", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/fr/unit4/ddim_inversion.ipynb"}, ]} /> Dans ce *notebook*, nous allons explorer l'inversion, voir comment elle est liée à l'échantillonnage, et l'appliquer à la tâche d'édition d'images avec Stable Diffusion. Ce que vous allez apprendre : - Comment fonctionne l'échantillonnage DDIM - Échantillonneurs déterministes et stochastiques - La théorie derrière l'inversion DDIM - L'édition d'images avec l'inversion Commençons ! # Configuration ```py # !pip install -q transformers diffusers accelerate ``` ```py import torch import requests import torch.nn as nn import torch.nn.functional as F from PIL import Image from io import BytesIO from tqdm.auto import tqdm from matplotlib import pyplot as plt from torchvision import transforms as tfms from diffusers import StableDiffusionPipeline, DDIMScheduler # Une fonction utile pour plus tard def load_image(url, size=None): response = requests.get(url,timeout=0.2) img = Image.open(BytesIO(response.content)).convert('RGB') if size is not None: img = img.resize(size) return img ``` ```py device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ``` ## Chargement d'un pipeline existant ```py # Charger un pipeline pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(device) ``` ```py # Mettre en place un planificateur DDIM pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) ``` ```py # Échantillon d'une image pour s'assurer que tout fonctionne bien prompt = 'Beautiful DSLR Photograph of a penguin on the beach, golden hour' negative_prompt = 'blurry, ugly, stock photo' im = pipe(prompt, negative_prompt=negative_prompt).images[0] im.resize((256, 256)) # redimensionner pour une meilleure visualisation ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ## Echantillonage DDIM À un moment donné $t$, l'image bruitée $x_t$ est un mélange de l'image originale ($x_0$) et de bruit ($\epsilon$). Voici la formule pour $x_t$ tirée de l'article DDIM, à laquelle nous nous référerons dans cette section : $$ x_t = \sqrt{\alpha_t}x_0 + \sqrt{1-\alpha_t}\epsilon $$ $\epsilon$ est un bruit gaussien de variance unitaire $\alpha_t$ ("alpha") est la valeur qui est appelée de manière confuse $\bar{\alpha}$ ("alpha_bar") dans le papier DDPM ( !!) et qui définit le planificateur de bruit. Dans 🤗 *Diffusers*, le planificateur alpha est calculé et les valeurs sont stockées dans le `scheduler.alphas_cumprod`. Je sais que c'est déroutant ! Traçons ces valeurs, et n'oubliez pas que pour le reste de ce *notebook*, nous utiliserons la notation de DDIM. ```py # Tracer 'alpha' (alpha_bar dans DDPM, alphas_cumprod dans Diffusers) timesteps = pipe.scheduler.timesteps.cpu() alphas = pipe.scheduler.alphas_cumprod[timesteps] plt.plot(timesteps, alphas, label='alpha_t'); plt.legend() ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Au départ (étape 0, côté gauche du graphique), nous commençons avec une image propre et sans bruit. $\alpha_t = 1$. Au fur et à mesure que nous passons à des pas de temps plus élevés, nous nous retrouvons avec presque tout le bruit et $\alpha_t$ chute vers 0. Lors de l'échantillonnage, nous commençons avec du bruit pur au pas de temps $1000$ et nous nous rapprochons lentement du pas de temps $0$. Pour calculer le prochain $t$ de la trajectoire d'échantillonnage ($x_{t-1}$ puisque nous passons d'un $t$ élevé à un $t$ faible), nous prédisons le bruit ($\epsilon_\theta(x_t)$, qui est la sortie de notre modèle) et nous l'utilisons pour calculer l'image débruitée prédite $x_0$. Nous utilisons ensuite cette prédiction pour nous déplacer sur une petite distance dans la "direction pointant vers $x_t$". Enfin, nous pouvons ajouter du bruit supplémentaire à l'échelle de $\sigma_t$. Voici la section de l'article qui montre cette méthode en action : <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Nous disposons donc d'une équation permettant de passer de $x_t$ à $x_{t-1}$, avec une quantité de bruit contrôlable. Dans notre cas présent, nous nous intéressons plus particulièrement au cas où nous n'ajoutons aucun bruit supplémentaire, ce qui nous donne un échantillonnage DDIM entièrement déterministe. Voyons ce que cela donne en code : ```py # Fonction d'échantillonnage (DDIM standard) @torch.no_grad() def sample(prompt, start_step=0, start_latents=None, guidance_scale=3.5, num_inference_steps=30, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt='', device=device): # Encoder le prompt text_embeddings = pipe._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # Nombre d'étapes d'inférence pipe.scheduler.set_timesteps(num_inference_steps, device=device) # Créer un point de départ aléatoire si nous n'en avons pas déjà un if start_latents is None: start_latents = torch.randn(1, 4, 64, 64, device=device) start_latents *= pipe.scheduler.init_noise_sigma latents = start_latents.clone() for i in tqdm(range(start_step, num_inference_steps)): t = pipe.scheduler.timesteps[i] # développer les latents si l'on procède à un guidage sans classifieur latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t) # prédire le bruit résiduel noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample #réaliser un guidage if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # Normalement, nous devrions nous fier au planificateur pour gérer l'étape de mise à jour : # latents = pipe.scheduler.step(noise_pred, t, latents).prev_sample # Au lieu de cela, faisons-le nous-mêmes : prev_t = max(1, t.item() - (1000//num_inference_steps)) # t-1 alpha_t = pipe.scheduler.alphas_cumprod[t.item()] alpha_t_prev = pipe.scheduler.alphas_cumprod[prev_t] predicted_x0 = (latents - (1-alpha_t).sqrt()*noise_pred) / alpha_t.sqrt() direction_pointing_to_xt = (1-alpha_t_prev).sqrt()*noise_pred latents = alpha_t_prev.sqrt()*predicted_x0 + direction_pointing_to_xt # Post-traitement images = pipe.decode_latents(latents) images = pipe.numpy_to_pil(images) return images ``` ```py # Tester notre fonction d'échantillonnage en générant une image sample('Watercolor painting of a beach sunset', negative_prompt=negative_prompt, num_inference_steps=50)[0].resize((256, 256)) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Voyez si vous pouvez faire correspondre le code avec l'équation de l'article. Notez que $\sigma$=0 puisque nous ne nous intéressons qu'au cas où il n'y a pas de bruit supplémentaire, nous pouvons donc laisser de côté ces éléments de l'équation. ## Inversion L'objectif est d'inverser le processus d'échantillonnage. Nous voulons obtenir un latent bruité qui, s'il est utilisé comme point de départ de notre procédure d'échantillonnage habituelle, permet de générer l'image originale. Ici, nous chargeons une image comme image initiale, mais vous pouvez également en générer une vous-même pour l'utiliser à la place. ```py # https://www.pexels.com/photo/a-beagle-on-green-grass-field-8306128/ input_image = load_image('https://images.pexels.com/photos/8306128/pexels-photo-8306128.jpeg', size=(512, 512)) input_image ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Nous allons également utiliser un prompt pour effectuer l'inversion avec l'aide d'un classifieur libre, alors entrez une description de l'image : ```py input_image_prompt = "Photograph of a puppy on the grass" ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Ensuite, nous devons transformer cette image PIL en un ensemble de latents que nous utiliserons comme point de départ de notre inversion : ```py # encoder avec le VAE with torch.no_grad(): latent = pipe.vae.encode(tfms.functional.to_tensor(input_image).unsqueeze(0).to(device)*2-1) l = 0.18215 * latent.latent_dist.sample() ``` Très bien, il est temps de passer à la partie amusante. Cette fonction ressemble à la fonction d'échantillonnage ci-dessus, mais nous nous déplaçons à travers les pas de temps dans la direction opposée, en commençant à $t=0$ et en nous déplaçant vers un bruit de plus en plus élevé. Et au lieu de mettre à jour nos latents pour qu'ils soient moins bruyants, nous estimons le bruit prédit et l'utilisons pour ANNULER une étape de mise à jour, en les déplaçant de $t$ à $t+1$. ```py ## Inversion @torch.no_grad() def invert(start_latents, prompt, guidance_scale=3.5, num_inference_steps=80, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt='', device=device): # Encoder le prompt text_embeddings = pipe._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # les latents sont maintenant les latents de départ spécifiés latents = start_latents.clone() # Nous garderons une liste des latents inversés au fur et à mesure du processus intermediate_latents = [] # Définir le nombre d'étapes de l'inférence pipe.scheduler.set_timesteps(num_inference_steps, device=device) # Pas de temps inversés <<<<<<<<<<<<<<<<<<<< timesteps = reversed(pipe.scheduler.timesteps) for i in tqdm(range(1, num_inference_steps), total=num_inference_steps-1): # Nous allons sauter l'itération finale if i >= num_inference_steps - 1: continue t = timesteps[i] # développer les latents si l'on fait de l'orientation sans classifieur latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t) # prédire le bruit résiduel noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # effectuer un guidage if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) current_t = max(0, t.item() - (1000//num_inference_steps))#t next_t = t # min(999, t.item() + (1000//num_inference_steps)) # t+1 alpha_t = pipe.scheduler.alphas_cumprod[current_t] alpha_t_next = pipe.scheduler.alphas_cumprod[next_t] # Étape de mise à jour inversée (réorganisation de l'étape de mise à jour pour obtenir x(t) (nouveaux latents) en fonction de x(t-1) (latents actuels) latents = (latents - (1-alpha_t).sqrt()*noise_pred)*(alpha_t_next.sqrt()/alpha_t.sqrt()) + (1-alpha_t_next).sqrt()*noise_pred # Stockage intermediate_latents.append(latents) return torch.cat(intermediate_latents) ``` En l'exécutant sur la représentation latente de notre photo de chiot, nous obtenons un ensemble de tous les latents intermédiaires créés au cours du processus d'inversion : ```py inverted_latents = invert(l, input_image_prompt,num_inference_steps=50) inverted_latents.shape ``` ```py torch.Size([48, 4, 64, 64]) ``` Nous pouvons visualiser l'ensemble final de latents - ceux-ci constitueront, nous l'espérons, le point de départ bruyant de nos nouvelles tentatives d'échantillonnage : ```py # Décoder les latents inversés finaux with torch.no_grad(): im = pipe.decode_latents(inverted_latents[-1].unsqueeze(0)) pipe.numpy_to_pil(im)[0] ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Vous pouvez transmettre ces latents inversés au pipeline en utilisant la méthode__call__ normale : ```py pipe(input_image_prompt, latents=inverted_latents[-1][None], num_inference_steps=50, guidance_scale=3.5).images[0] ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Mais c'est là que nous voyons notre premier problème : ce n'est pas tout à fait l'image avec laquelle nous avons commencé ! En effet, l'inversion DDIM repose sur une hypothèse critique selon laquelle la prédiction du bruit à l'instant $t$ et à l'instant $t+1$ sera la même, ce qui n'est pas vrai lorsque l'inversion ne porte que sur $50$ ou $100$ pas de temps. Nous pourrions utiliser davantage de pas de temps pour espérer obtenir une inversion plus précise, mais nous pouvons également tricher et commencer à partir de, disons, $20/50$ pas d'échantillonnage avec les latents intermédiaires correspondants que nous avons sauvegardés lors de l'inversion : ```py # La raison pour laquelle nous voulons pouvoir spécifier l'étape de démarrage start_step=20 sample(input_image_prompt, start_latents=inverted_latents[-(start_step+1)][None], start_step=start_step, num_inference_steps=50)[0] ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Très proche de notre image d'entrée ! Pourquoi faisons-nous cela ? Eh bien, l'espoir est que si nous échantillonnons maintenant avec un nouveau prompt, nous obtiendrons une image qui correspond à l'original SAUF aux endroits pertinents pour le nouveau prompt. Par exemple, en remplaçant "puppy" par "cat", nous devrions voir un chat avec un dos et un arrière-plan presque identiques : ```py # Échantillonnage avec un nouveau prompt start_step=10 new_prompt = input_image_prompt.replace('puppy', 'cat') sample(new_prompt, start_latents=inverted_latents[-(start_step+1)][None], start_step=start_step, num_inference_steps=50)[0] ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ### Pourquoi ne pas utiliser img2img ? Pourquoi s'embêter à inverser ? Ne peut-on pas simplement ajouter du bruit à l'image d'entrée et la débruiter avec le nouveau prompt ? Nous le pouvons, mais cela entraînera des changements beaucoup plus radicaux partout (si nous ajoutons beaucoup de bruit) ou des changements insuffisants partout (si nous ajoutons moins de bruit). Essayez vous-même : ```py start_step = 10 num_inference_steps=50 pipe.scheduler.set_timesteps(num_inference_steps) noisy_l = pipe.scheduler.add_noise(l, torch.randn_like(l), pipe.scheduler.timesteps[start_step]) sample(new_prompt, start_latents=noisy_l, start_step=start_step, num_inference_steps=num_inference_steps)[0] ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Notez la modification beaucoup plus importante de la pelouse et de l'arrière-plan. ## Rassembler le tout Rassemblons le code que nous avons écrit jusqu'à présent dans une fonction simple qui prend une image et deux prompts et effectue une modification en utilisant l'inversion : ```py def edit(input_image, input_image_prompt, edit_prompt, num_steps=100, start_step=30, guidance_scale=3.5): with torch.no_grad(): latent = pipe.vae.encode(tfms.functional.to_tensor(input_image).unsqueeze(0).to(device)*2-1) l = 0.18215 * latent.latent_dist.sample() inverted_latents = invert(l, input_image_prompt,num_inference_steps=num_steps) final_im = sample(edit_prompt, start_latents=inverted_latents[-(start_step+1)][None], start_step=start_step, num_inference_steps=num_steps, guidance_scale=guidance_scale)[0] return final_im ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> Et en action : ```py edit(input_image, 'A puppy on the grass', 'an old grey dog on the grass', num_steps=50, start_step=10) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ```py edit(input_image, 'A puppy on the grass', 'A blue dog on the lawn', num_steps=50, start_step=12, guidance_scale=6) ``` <Tip> ✏️ *A votre tour !* essayez ceci sur d'autres images ! Explorez les différents paramètres. </Tip> ## Plus de pas = meilleure performance Si vous avez des problèmes avec des inversions moins précises, vous pouvez essayer d'utiliser plus de pas (au prix d'un temps d'exécution plus long). Pour tester l'inversion, vous pouvez utiliser notre fonction d'édition avec le même prompt : ```py # Test d'inversion avec beaucoup plus d'étapes : edit(input_image, 'A puppy on the grass', 'A puppy on the grass', num_steps=350, start_step=1) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> C'est beaucoup mieux ! Et en essayant de l'éditer : ```py edit(input_image, 'A photograph of a puppy', 'A photograph of a grey cat', num_steps=150, start_step=30, guidance_scale=5.5) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ```py # source: https://www.pexels.com/photo/girl-taking-photo-1493111/ face = load_image('https://images.pexels.com/photos/1493111/pexels-photo-1493111.jpeg', size=(512, 512)) face ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ```py edit(face, 'A photograph of a face', 'A photograph of a face with sunglasses', num_steps=250, start_step=30, guidance_scale=3.5) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ```py edit(face, 'A photograph of a face', 'Acrylic palette knife painting of a face, colorful', num_steps=250, start_step=65, guidance_scale=5.5) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> ## Et ensuite ? Armé des connaissances de ce *notebook*, nous vous recommandons d'étudier [*Null-text Inversion*](https://null-text-inversion.github.io/) qui s'appuie sur DDIM en optimisant le texte nul (prompt inconditionnel) lors de l'inversion pour des inversions plus précises et de meilleures éditions.
diffusion-models-class/units/fr/unit4/2.mdx/0
{ "file_path": "diffusion-models-class/units/fr/unit4/2.mdx", "repo_id": "diffusion-models-class", "token_count": 9611 }
150
.PHONY: doc-notebooks doc-notebooks: python utils/convert_doc_to_notebooks.py
notebooks/Makefile/0
{ "file_path": "notebooks/Makefile", "repo_id": "notebooks", "token_count": 33 }
151